From e8e8437c391a2d3b55bbeaf51537a2041c965b9b Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Wed, 3 Mar 2021 13:30:20 -0500 Subject: [PATCH 01/28] Default to all sources if no config file is present; remove use-active-sources hook Former-commit-id: 54adc42dce7187ebf51206d76ce9d024f5989e16 --- gatsby-config.js | 26 +++++++++++++++++--------- gatsby-node.js | 26 -------------------------- src/hooks/use-active-sources.js | 18 ------------------ 3 files changed, 17 insertions(+), 53 deletions(-) delete mode 100644 src/hooks/use-active-sources.js diff --git a/gatsby-config.js b/gatsby-config.js index 32987aabad5..36c2d8cdcdf 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -5,6 +5,7 @@ const utf8Truncate = require('truncate-utf8-bytes'); const gracefulFs = require('graceful-fs'); const ANSI_BLUE = '\033[34m'; +const ANSI_GREEN = '\033[32m'; const ANSI_STOP = '\033[0m'; const isBuild = process.env.NODE_ENV === 'production'; @@ -65,12 +66,23 @@ const sourceToPluginConfig = { const externalSourcePlugins = () => { const sourcePlugins = []; - if (!process.env.SKIP_SOURCING && gracefulFs.existsSync(sourceFilename)) { - console.log( - `${ANSI_BLUE}###### Sourcing from ${sourceFilename} #######${ANSI_STOP}`, - ); + if (!process.env.SKIP_SOURCING) { + // default to full set of sources + let sources = Object.keys(sourceToPluginConfig).reduce((result, source) => { + result[source] = true; + return result; + }, {}); + + if (gracefulFs.existsSync(sourceFilename)) { + console.log( + `${ANSI_BLUE}###### Sourcing from ${sourceFilename} #######${ANSI_STOP}`, + ); + console.log( + `${ANSI_GREEN}Note that ${sourceFilename} is no longer strictly required - the full set of docs will be loaded in its absence.${ANSI_STOP}`, + ); + sources = JSON.parse(gracefulFs.readFileSync(sourceFilename)); + } - const sources = JSON.parse(gracefulFs.readFileSync(sourceFilename)); for (const [source, enabled] of Object.entries(sources)) { const config = sourceToPluginConfig[source]; if (enabled && config) { @@ -83,10 +95,6 @@ const externalSourcePlugins = () => { }); } } - } else if (isBuild) { - console.error( - 'Configure sources with `yarn config-sources`. Defaulting to advocacy content only!', - ); } return sourcePlugins; diff --git a/gatsby-node.js b/gatsby-node.js index 938bf01bdc4..2e1ff8415df 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -354,32 +354,6 @@ exports.sourceNodes = async ({ createNodeId, createContentDigest, }) => { - // create edb-sources node - const activeSources = ['advocacy']; - - if (!process.env.SKIP_SOURCING) { - const sources = JSON.parse( - gracefulFs.readFileSync( - isBuild ? 'build-sources.json' : 'dev-sources.json', - ), - ); - for (const [source, enabled] of Object.entries(sources)) { - if (enabled) { - activeSources.push(source); - } - } - } - - const sourcesNodeData = { activeSources: activeSources }; - createNode({ - ...sourcesNodeData, - id: createNodeId('edb-sources'), - internal: { - type: 'edbSources', - contentDigest: createContentDigest(sourcesNodeData), - }, - }); - // create edb-git node const sha = ( await new Promise((resolve, reject) => { diff --git a/src/hooks/use-active-sources.js b/src/hooks/use-active-sources.js deleted file mode 100644 index 810353991a4..00000000000 --- a/src/hooks/use-active-sources.js +++ /dev/null @@ -1,18 +0,0 @@ -import { useStaticQuery, graphql } from 'gatsby'; - -const useActiveSources = () => { - const data = useStaticQuery(graphql` - { - edbSources { - activeSources - } - } - `); - - return data.edbSources.activeSources.reduce((obj, val) => { - obj[`${val}Active`] = true; - return obj; - }, {}); -}; - -export default useActiveSources; From e75334791da2e9a8629fa3711b4429a11cdc9d27 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Wed, 3 Mar 2021 18:51:15 +0000 Subject: [PATCH 02/28] normalization Former-commit-id: 4947fee9562be51e857f78ce3693993bac9db6e4 --- .../8.0.1/pem_admin/02_registering_server.mdx | 48 ++-- .../03_pem_define_aws_instance_connection.mdx | 32 +-- .../pem_admin/06_managing_pem_server.mdx | 2 +- .../docs/pem/8.0.1/pem_admin/index.mdx | 2 +- .../8.0.1/pem_agent/03_managing_pem_agent.mdx | 1 - ..._performance_monitoring_and_management.mdx | 5 +- ...rerequisites_for_installing_pem_server.mdx | 18 +- .../07_registering_a_pem_agent.mdx | 40 +-- .../05_the_pem_web_interface.mdx | 2 +- .../01_pem_architecture.mdx | 7 +- .../02_pem_server_logon.mdx | 9 +- ...03_pem_managing_configuration_settings.mdx | 1 - .../04_pem_roles.mdx | 55 ++-- .../05_group_dialog.mdx | 1 - .../06_auto_discovery_dialog.mdx | 1 - .../07_pem_define_connection.mdx | 17 +- .../08_pem_define_aws_instance_connection.mdx | 40 ++- .../09_pem_connect_to_server.mdx | 3 +- .../10_control_server.mdx | 3 +- .../11_connect_error.mdx | 8 +- .../01_toc_pem_getting_started/index.mdx | 7 +- .../01_pem_agent_properties.mdx | 3 +- .../01_pem_agent_config_params.mdx | 77 +++--- .../02_pem_agent_binding/02_pem_agent_ha.mdx | 5 +- .../02_pem_agent_binding/index.mdx | 9 +- .../03_pem_agent_start_pem_agent.mdx | 1 - .../02_toc_pem_agent/04_pem_agent_ha.mdx | 5 +- .../05_pem_agent_privileges.mdx | 18 +- .../06_pem_agent_config_params.mdx | 77 +++--- .../07_pem_agent_self_registration.mdx | 24 +- .../08_pem_agent_server_registration.mdx | 50 ++-- .../02_toc_pem_agent/index.mdx | 5 +- .../01_pem_browser_window.mdx | 44 ++- .../03_toc_pem_client/02_pem_toolbar.mdx | 5 +- .../03_toc_pem_client/03_pem_menu_bar.mdx | 109 ++++---- .../03_toc_pem_client/04_preferences.mdx | 9 +- .../05_keyboard_shortcuts.mdx | 84 +++--- .../03_toc_pem_client/06_search_objects.mdx | 5 +- .../03_toc_pem_client/index.mdx | 5 +- .../01_dashboards/01_alerts_dashboard.mdx | 5 +- .../01_dashboards/02_audit_log_dashboard.mdx | 3 +- .../03_database_analysis_dashboard.mdx | 3 +- .../04_global_overview_dashboard.mdx | 5 +- .../05_io_analysis_dashboard.mdx | 5 +- .../06_memory_analysis_dashboard.mdx | 3 +- .../07_object_activity_analysis_dashboard.mdx | 3 +- .../08_os_analysis_dashboard.mdx | 3 +- .../09_probe_log_analysis_dashboard.mdx | 3 +- .../10_server_analysis_dashboard.mdx | 3 +- .../11_server_log_analysis_dashboard.mdx | 1 - ...12_session_activity_analysis_dashboard.mdx | 5 +- .../13_session_waits_dashboard.mdx | 7 +- .../14_storage_analysis_dashboard.mdx | 3 +- .../15_system_wait_dashboard.mdx | 7 +- .../16_str_replication_dashboard.mdx | 3 +- .../01_dashboards/index.mdx | 37 ++- .../01_pem_config_options.mdx | 139 +++++++++- .../02_pem_server_config/index.mdx | 5 +- .../04_toc_pem_features/03_log_manager.mdx | 24 +- .../04_toc_pem_features/04_audit_manager.mdx | 7 +- .../05_pem_log_analysis_expert.mdx | 3 +- .../04_toc_pem_features/06_tuning_wizard.mdx | 7 +- ...e_schema_config_expert_recommendations.mdx | 37 ++- .../02_pe_security_expert_recommendations.mdx | 57 ++-- ...e_configuration_expert_recommendations.mdx | 261 +++++++++--------- .../07_pem_postgres_expert/index.mdx | 17 +- .../01_capacity_manager_metrics.mdx | 3 +- .../02_capacity_manager_options.mdx | 5 +- .../08_capacity_manager/index.mdx | 248 ++++++++--------- .../01_pem_alerting_dialog.mdx | 33 ++- .../09_pem_alerting/02_pem_alert_copy.mdx | 1 - .../03_pem_alert_templates.mdx | 153 +++++----- .../04_pem_custom_alert_templates.mdx | 24 +- .../09_pem_alerting/05_pem_email_groups.mdx | 1 - .../09_pem_alerting/06_pem_webhooks.mdx | 7 +- .../07_snmp_mib_generation.mdx | 5 +- .../09_pem_alerting/08_snmp_trap_details.mdx | 41 +-- .../09_using_pem_with_nagios.mdx | 13 +- .../09_pem_alerting/index.mdx | 21 +- .../01_pem_create_new_chart.mdx | 1 - .../02_pem_manage_charts_template.mdx | 1 - .../10_pem_manage_charts/index.mdx | 7 +- .../01_pem_custom_dashboard.mdx | 3 +- .../02_pem_ops_dashboard.mdx | 3 +- .../11_pem_manage_dashboards/index.mdx | 3 +- .../01_pem_custom_probes.mdx | 1 - .../02_copy_probe_config.mdx | 1 - .../03_pem_probe_config/01_pem_probes.mdx | 54 +++- .../03_pem_probe_config/index.mdx | 9 +- .../12_pem_manage_probes/index.mdx | 9 +- .../13_pem_alert_blackout.mdx | 1 - .../14_pem_scheduled_system_jobs.mdx | 1 - .../15_pem_scheduled_task_tab.mdx | 4 +- .../16_pem_scheduled_jobs.mdx | 5 +- .../17_pem_job_notification.mdx | 1 - .../18_pem_task_view/01_pem_log_view.mdx | 3 +- .../18_pem_task_view/index.mdx | 8 +- ..._monitoring_a_failover_manager_cluster.mdx | 11 +- .../20_performance_diagnostic.mdx | 3 +- .../04_toc_pem_features/21_reports.mdx | 3 +- .../04_toc_pem_features/index.mdx | 9 +- .../01_grant_wizard.mdx | 1 - .../02_add_restore_point_dialog.mdx | 1 - .../03_import_export_data.mdx | 3 +- .../04_maintenance/01_maintenance_dialog.mdx | 1 - .../04_maintenance/index.mdx | 3 +- .../05_storage_manager.mdx | 3 +- .../06_backup_dialog.mdx | 7 +- .../07_backup_globals_dialog.mdx | 3 +- .../08_backup_server_dialog.mdx | 3 +- .../09_restore_dialog.mdx | 9 +- .../01_database_dialog.mdx | 3 +- .../02_move_objects.mdx | 1 - .../03_resource_group_dialog.mdx | 1 - .../04_role_dialog.mdx | 3 +- .../05_tablespace_dialog.mdx | 3 +- .../10_managing_cluster_objects/index.mdx | 3 +- .../01_cast_dialog.mdx | 1 - .../02_collation_dialog.mdx | 1 - .../03_domain_dialog.mdx | 1 - .../04_domain_constraint_dialog.mdx | 1 - .../05_event_trigger_dialog.mdx | 3 +- .../06_extension_dialog.mdx | 1 - .../07_foreign_data_wrapper_dialog.mdx | 1 - .../08_foreign_server_dialog.mdx | 1 - .../09_foreign_table_dialog.mdx | 1 - .../10_fts_configuration_dialog.mdx | 1 - .../11_fts_dictionary_dialog.mdx | 1 - .../12_fts_parser_dialog.mdx | 1 - .../13_fts_template_dialog.mdx | 1 - .../14_function_dialog.mdx | 5 +- .../15_language_dialog.mdx | 1 - .../16_materialized_view_dialog.mdx | 5 +- .../17_package_dialog.mdx | 1 - .../18_procedure_dialog.mdx | 3 +- .../19_schema_dialog.mdx | 1 - .../20_sequence_dialog.mdx | 1 - .../21_synonym_dialog.mdx | 1 - .../22_trigger_function_dialog.mdx | 5 +- .../23_type_dialog.mdx | 15 +- .../24_user_mapping_dialog.mdx | 1 - .../25_view_dialog.mdx | 1 - .../11_managing_database_objects/index.mdx | 3 +- .../12_modifying_tables/01_check_dialog.mdx | 1 - .../12_modifying_tables/02_column_dialog.mdx | 1 - .../03_compound_trigger_dialog.mdx | 1 - .../04_exclusion_constraint_dialog.mdx | 1 - .../05_foreign_key_dialog.mdx | 3 +- .../12_modifying_tables/06_index_dialog.mdx | 1 - .../07_primary_key_dialog.mdx | 1 - .../08_rls_policy_dialog.mdx | 1 - .../12_modifying_tables/09_rule_dialog.mdx | 1 - .../12_modifying_tables/10_table_dialog.mdx | 13 +- .../12_modifying_tables/11_trigger_dialog.mdx | 1 - .../12_unique_constraint_dialog.mdx | 1 - .../12_modifying_tables/index.mdx | 3 +- .../05_toc_pem_management_basics/index.mdx | 9 +- .../01_managing_bart_prerequisites.mdx | 3 +- .../02_configuring_bart_server.mdx | 3 +- ...ating_bart_server_with_database_server.mdx | 7 +- .../04_viewing_bart_dashboard.mdx | 1 - .../05_scheduling_bart_backups.mdx | 3 +- ...eduling_bart_obsolete_backups_deletion.mdx | 1 - .../07_bart_backup_dialog.mdx | 3 +- .../08_restoring_bart_backups.mdx | 1 - .../06_toc_pem_bart_management/index.mdx | 9 +- .../01_sp_installing_sql_profiler.mdx | 1 - .../02_sp_configuring_sql_profiler.mdx | 1 - .../03_sp_create_new_trace.mdx | 3 +- .../04_sp_index_advisor.mdx | 5 +- .../05_sp_sql_profiler_tab.mdx | 13 +- .../07_toc_pem_sql_profiler/index.mdx | 5 +- .../01_debugger.mdx | 12 +- .../02_query_tool.mdx | 37 ++- .../03_pem_interpreting_graphical_query.mdx | 5 +- .../04_editgrid/01_viewdata_filter.mdx | 3 +- .../04_editgrid/index.mdx | 23 +- .../05_schema_diff.mdx | 10 +- .../08_toc_pem_developer_tools/index.mdx | 3 +- ..._pem_pgbouncer_server_agent_connection.mdx | 3 +- .../02_pem_pgbouncer_preparing_dbserver.mdx | 61 ++-- ...03_pem_pgbouncer_configuring_pgbouncer.mdx | 165 +++++------ ...04_pem_pgbouncer_configuring_pem_agent.mdx | 89 +++--- .../09_toc_pem_configure_pgbouncer/index.mdx | 3 +- .../10_pgagent/01_using_pgagent.mdx | 1 - .../10_pgagent/02_pgagent_install.mdx | 61 ++-- .../10_pgagent/03_pgagent_jobs.mdx | 8 +- .../10_pgagent/04_pgagent-steps.mdx | 3 +- .../10_pgagent/05_pgagent-schedules.mdx | 1 - .../pem_online_help/10_pgagent/index.mdx | 3 +- .../11_appendices/01_licence.mdx | 1 - .../11_appendices/02_kerberos.mdx | 1 - .../11_appendices/03_openssl.mdx | 3 +- .../11_appendices/04_snmp++.mdx | 1 - .../11_appendices/05_jquery_table_sort.mdx | 5 +- .../pem_online_help/11_appendices/index.mdx | 5 +- .../01_pem_release_notes_8_0_1.mdx | 102 ++++--- .../02_pem_release_notes_8_0.mdx | 110 ++++---- .../03_pem_release_notes_7_16.mdx | 182 ++++++------ .../04_pem_release_notes_7_15.mdx | 128 +++++---- .../05_pem_release_notes_7_14.mdx | 184 ++++++------ .../06_pem_release_notes_7_13.mdx | 110 ++++---- .../07_pem_release_notes_7_12.mdx | 124 ++++----- .../08_pem_release_notes_7_11.mdx | 26 +- .../09_pem_release_notes_7_10.mdx | 184 ++++++------ .../10_pem_release_notes_7_9.mdx | 138 +++++---- .../11_pem_release_notes_7_8.mdx | 134 +++++---- .../12_pem_release_notes_7_7_1.mdx | 6 +- .../13_pem_release_notes_7_7.mdx | 196 +++++++------ .../14_pem_release_notes_7_6.mdx | 76 +++-- .../15_pem_release_notes_7_5.mdx | 62 ++--- .../12_release_notes/index.mdx | 4 +- .../docs/pem/8.0.1/pem_online_help/index.mdx | 4 +- ...ading_pem_installation_linux_graphical.mdx | 8 +- .../02_upgrading_backend_database.mdx | 148 +++++----- .../pem_upgrade/03_moving_pem_server.mdx | 170 ++++++------ 216 files changed, 2584 insertions(+), 2411 deletions(-) diff --git a/product_docs/docs/pem/8.0.1/pem_admin/02_registering_server.mdx b/product_docs/docs/pem/8.0.1/pem_admin/02_registering_server.mdx index fca34dd8dff..c7830d7cb70 100644 --- a/product_docs/docs/pem/8.0.1/pem_admin/02_registering_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_admin/02_registering_server.mdx @@ -206,25 +206,25 @@ On a Windows host, use the command: Append command line options to the command string when invoking the `pemworker` utility. Each option should be followed by a corresponding value: -| Option | Description | -| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `--pem-user` | Specifies the name of the PEM administrative user. Required. | -| `--server-addr` | Specifies the IP address of the server host, or the fully qualified domain name. On Unix based systems, the address field may be left blank to use the default PostgreSQL Unix Domain Socket on the local machine, or may be set to an alternate path containing a PostgreSQL socket. If you enter a path, the path must begin with a /. Required. | -| `--server-port` | Specifies the port number of the host. Required. | -| `--server-database` | Specifies the name of the database to which the server will connect. Required. | -| `--server-user` | Specify the name of the user that will be used by the agent when monitoring the server. Required. | -| `--server-service-name` | Specifies the name of the database service that controls operations on the server that is being registered (STOP, START, RESTART, etc.). Optional. | -| `--remote-monitoring` | Include the --remote-monitoring clause and a value of no (the default) to indicate that the server is installed on the same machine as the PEM agent. When remote monitoring is enabled (yes), agent level statistics for the monitored server will not be available for custom charts and dashboards, and the remote server will not be accessible by some PEM utilities (such as Audit Manager, Capacity Manager, Log Manager, Postgres Expert and Tuning Wizard). Required. | -| `--efm-cluster-name` | Specifies the name of the Failover Manager cluster that monitors the server (if applicable). Optional. | -| `--efm-install-path` | Specifies the complete path to the installation directory of Failover Manager (if applicable). Optional. | -| `--asb-host-name` | Specifies the name of the host to which the agent is connecting. | -| `--asb-host-port` | Specifies the port number that the agent will use when connecting to the database. | -| `--asb-host-db` | Specifies the name of the database to which the agent will connect. | -| `--asb-host-user` | Specifies the database user name that the agent will supply when authenticating with the database. | -| `--asb-ssl-mode` | Specifies the type of SSL authentication that will be used for connections. Supported values include: prefer, require, disable, verify-CA, verify-full. | -| `--group` | Specifies the name of the group in which the server will be displayed. | -| `--team` | Specifies the name of the group role that will be allowed to access the server. | -| `--owner` | Specifies the name of the role that will own the monitored server. | +| Option | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `--pem-user` | Specifies the name of the PEM administrative user. Required. | +| `--server-addr` | Specifies the IP address of the server host, or the fully qualified domain name. On Unix based systems, the address field may be left blank to use the default PostgreSQL Unix Domain Socket on the local machine, or may be set to an alternate path containing a PostgreSQL socket. If you enter a path, the path must begin with a /. Required. | +| `--server-port` | Specifies the port number of the host. Required. | +| `--server-database` | Specifies the name of the database to which the server will connect. Required. | +| `--server-user` | Specify the name of the user that will be used by the agent when monitoring the server. Required. | +| `--server-service-name` | Specifies the name of the database service that controls operations on the server that is being registered (STOP, START, RESTART, etc.). Optional. | +| `--remote-monitoring` | Include the --remote-monitoring clause and a value of no (the default) to indicate that the server is installed on the same machine as the PEM agent. When remote monitoring is enabled (yes), agent level statistics for the monitored server will not be available for custom charts and dashboards, and the remote server will not be accessible by some PEM utilities (such as Audit Manager, Capacity Manager, Log Manager, Postgres Expert and Tuning Wizard). Required. | +| `--efm-cluster-name` | Specifies the name of the Failover Manager cluster that monitors the server (if applicable). Optional. | +| `--efm-install-path` | Specifies the complete path to the installation directory of Failover Manager (if applicable). Optional. | +| `--asb-host-name` | Specifies the name of the host to which the agent is connecting. | +| `--asb-host-port` | Specifies the port number that the agent will use when connecting to the database. | +| `--asb-host-db` | Specifies the name of the database to which the agent will connect. | +| `--asb-host-user` | Specifies the database user name that the agent will supply when authenticating with the database. | +| `--asb-ssl-mode` | Specifies the type of SSL authentication that will be used for connections. Supported values include: prefer, require, disable, verify-CA, verify-full. | +| `--group` | Specifies the name of the group in which the server will be displayed. | +| `--team` | Specifies the name of the group role that will be allowed to access the server. | +| `--owner` | Specifies the name of the role that will own the monitored server. | Set the environment variable `PEM_SERVER_PASSWORD` to provide the password for the PEM server to allow the pemworker to connect as a PEM admin user. @@ -246,11 +246,11 @@ On a Windows host, use the command: Append command line options to the command string when invoking the `pemworker` utility. Each option should be followed by a corresponding value: -| Option | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `--pem-user` | Specifies the name of the PEM administrative user. Required. | -| `--server-addr` | Specifies the IP address of the server host, or the fully qualified domain name. On Unix based systems, the address field may be left blank to use the default PostgreSQL Unix Domain Socket on the local machine, or may be set to an alternate path containing a PostgreSQL socket. If you enter a path, the path must begin with a /. Required. | -| `--server-port` | Specifies the port number of the host. Required. | +| Option | Description | +| --------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--pem-user` | Specifies the name of the PEM administrative user. Required. | +| `--server-addr` | Specifies the IP address of the server host, or the fully qualified domain name. On Unix based systems, the address field may be left blank to use the default PostgreSQL Unix Domain Socket on the local machine, or may be set to an alternate path containing a PostgreSQL socket. If you enter a path, the path must begin with a /. Required. | +| `--server-port` | Specifies the port number of the host. Required. | Use the `PEM_SERVER_PASSWORD` environment variable to provide the password for the PEM server to allow the pemworker to connect as a PEM admin user. diff --git a/product_docs/docs/pem/8.0.1/pem_admin/03_pem_define_aws_instance_connection.mdx b/product_docs/docs/pem/8.0.1/pem_admin/03_pem_define_aws_instance_connection.mdx index a3bcbdb5418..a860158d8bf 100644 --- a/product_docs/docs/pem/8.0.1/pem_admin/03_pem_define_aws_instance_connection.mdx +++ b/product_docs/docs/pem/8.0.1/pem_admin/03_pem_define_aws_instance_connection.mdx @@ -36,19 +36,19 @@ The `PEM Agent` tab in the `Create - Server` dialog must have the `Remote Monito As the PEM Agent will be monitoring the Postgres(RDS) AWS instance remotely, the functionality will be limited as described below: -| Feature Name | Works with remote PEM Agent | Comments | -| ---------------------------- | ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Audit Manager | No | | -| Capacity Manager | Limited | There will be no correlation between the database server and operating system metrices. | -| Log Manager | No | | -| Manage Alerts | Limited | When you run an alert script on the database server, it will run on the machine where the bound PEM Agent is running, and not on the actual database server machine. | -| Manage Charts | Yes | | -| Manage Dashboards | Limited | Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as it is not available. | -| Manage Probes | Limited | Some of the PEM probes will not return information, and some of the functionalities may be affected. For details about probe functionality, see the [PEM Agent Guide](../pem_agent/). | -| Postgres Expert | Limited | The Postgres Expert will provide partial information as operating system information is not available. | -| Postgres Log Analysis Expert | No | The Postgres Log Analysis Expert will not be able to perform an analysis as it is dependent on the logs imported by log manager, which will not work as required. | -| Scheduled Tasks | Limited | Scheduled tasks will work only for database server; scripts will run on a remote Agent. | -| Tuning Wizard | No | | -| System Reports | Yes | | -| Core Usage Reports | Limited | The Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed. | -| Managing BART | No | BART requires password less authentication between two machines, where database server and BART are installed. An AWS RDS instance doesn't allow to use host access. | +| Feature Name | Works with remote PEM Agent | Comments | +| ---------------------------- | --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Audit Manager | No | | +| Capacity Manager | Limited | There will be no correlation between the database server and operating system metrices. | +| Log Manager | No | | +| Manage Alerts | Limited | When you run an alert script on the database server, it will run on the machine where the bound PEM Agent is running, and not on the actual database server machine. | +| Manage Charts | Yes | | +| Manage Dashboards | Limited | Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as it is not available. | +| Manage Probes | Limited | Some of the PEM probes will not return information, and some of the functionalities may be affected. For details about probe functionality, see the [PEM Agent Guide](../pem_agent/). | +| Postgres Expert | Limited | The Postgres Expert will provide partial information as operating system information is not available. | +| Postgres Log Analysis Expert | No | The Postgres Log Analysis Expert will not be able to perform an analysis as it is dependent on the logs imported by log manager, which will not work as required. | +| Scheduled Tasks | Limited | Scheduled tasks will work only for database server; scripts will run on a remote Agent. | +| Tuning Wizard | No | | +| System Reports | Yes | | +| Core Usage Reports | Limited | The Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed. | +| Managing BART | No | BART requires password less authentication between two machines, where database server and BART are installed. An AWS RDS instance doesn't allow to use host access. | diff --git a/product_docs/docs/pem/8.0.1/pem_admin/06_managing_pem_server.mdx b/product_docs/docs/pem/8.0.1/pem_admin/06_managing_pem_server.mdx index 47631d709f7..c5292574c88 100644 --- a/product_docs/docs/pem/8.0.1/pem_admin/06_managing_pem_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_admin/06_managing_pem_server.mdx @@ -30,7 +30,7 @@ PEM allows you to startup and shutdown managed server instances with the PEM cli - For Advanced Server, the service name is `edb-as-` or `ppas-`. - For PostgreSQL, the service name is `postgresql-`. - + Where `x` indicates the server version number. After connecting to the server, you can start or stop the server by highlighting the server name in the tree control, and selecting `Queue Server Startup` or `Queue Server Shutdown` from the `Tools` menu. diff --git a/product_docs/docs/pem/8.0.1/pem_admin/index.mdx b/product_docs/docs/pem/8.0.1/pem_admin/index.mdx index f4e06e5377f..46b9c2a27f5 100644 --- a/product_docs/docs/pem/8.0.1/pem_admin/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_admin/index.mdx @@ -17,4 +17,4 @@ This document uses *Postgres* to mean either the PostgreSQL or EDB Postgres Adva overview registering_server pem_define_aws_instance_connection managing_certificates managing_configuration_settings managing_pem_server managing_pem_agent conclusion - \ No newline at end of file + diff --git a/product_docs/docs/pem/8.0.1/pem_agent/03_managing_pem_agent.mdx b/product_docs/docs/pem/8.0.1/pem_agent/03_managing_pem_agent.mdx index 81333c4e713..1a443da3b80 100644 --- a/product_docs/docs/pem/8.0.1/pem_agent/03_managing_pem_agent.mdx +++ b/product_docs/docs/pem/8.0.1/pem_agent/03_managing_pem_agent.mdx @@ -132,7 +132,6 @@ The registry contains the following entries: | WebhookSSLCaCrt | The complete path to the webhook's SSL ca certificate file. | | | AllowInsecureWebhooks | When set to true, allow webhooks to call with insecure flag. | false | - ## Agent Properties The PEM Agent `Properties` dialog provides information about the PEM agent from which the dialog was opened; to open the dialog, right-click on an agent name in the PEM client tree control, and select `Properties` from the context menu. diff --git a/product_docs/docs/pem/8.0.1/pem_ent_feat/04_performance_monitoring_and_management.mdx b/product_docs/docs/pem/8.0.1/pem_ent_feat/04_performance_monitoring_and_management.mdx index 69a6efbfcb3..4a17f918b82 100644 --- a/product_docs/docs/pem/8.0.1/pem_ent_feat/04_performance_monitoring_and_management.mdx +++ b/product_docs/docs/pem/8.0.1/pem_ent_feat/04_performance_monitoring_and_management.mdx @@ -761,7 +761,7 @@ PEM can send a notification or execute a script if an alert is triggered, or if ![The alert editor Notification tab](../images/alerting_define_notification.png) -Use the fields in the `Email notification` box to specify the Email group that will receive an email notification if the alert is triggered at the specified level. Use the [Email Groups](#Creating-an-Email-Group) tab to create an email group that contains the address of the user or users that will be notified when an alert is triggered. To access the `Email Groups` tab, click the `Email Groups` icon located in the `Quick Links` menu of the `Manage Alerts` tab. +Use the fields in the `Email notification` box to specify the Email group that will receive an email notification if the alert is triggered at the specified level. Use the [Email Groups](#creating-an-email-group) tab to create an email group that contains the address of the user or users that will be notified when an alert is triggered. To access the `Email Groups` tab, click the `Email Groups` icon located in the `Quick Links` menu of the `Manage Alerts` tab. - To instruct PEM to send an email when a specific alert level is reached, set the slider next to an alert level to Yes, and use the drop-down listbox to select the pre-defined user or group that will be notified. @@ -941,7 +941,6 @@ Select a server for which you wish to delete the scheduled alert backout and the You can use the `Reset` button to reset the details on the `Alert Blackout` dialog to the default settings. Please note that all saved blackouts will remain unaffected after resetting the current dialog values. - ## Notifications PEM can send a notification or execute a script if an alert is triggered, or if an alert is cleared. The Notifications can be send via following options: @@ -951,7 +950,6 @@ PEM can send a notification or execute a script if an alert is triggered, or if - SNMP - Nagios - Use the `Notification` tab to specify how PEM will behave if an alert is raised. ### SMTP @@ -962,7 +960,6 @@ Please note that you must configure the PEM Server to use an SMTP server to deli Postgres Enterprise Manager monitors your system for conditions that require user attention. You can use an email group to specify the email addresses of users that the server will notify if current values deviate from threshold values specified in an alert definition. An email group has the flexibility to notify multiple users, or target specific users during user-defined time periods. - Use the `Email Groups` tab to configure groups of SMTP email recipients. To access the `Email Groups` tab, select `Manage Alerts...` from the PEM client's `Management` menu; when the `Manage Alerts` tab opens, select `Email Groups` from the Quick Links toolbar. ![The Email Groups tab](../images/email_groups_tab.png) diff --git a/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/01_prerequisites_for_installing_pem_server.mdx b/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/01_prerequisites_for_installing_pem_server.mdx index 730d3e4c0f4..c9072287fd7 100644 --- a/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/01_prerequisites_for_installing_pem_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/01_prerequisites_for_installing_pem_server.mdx @@ -7,7 +7,7 @@ title: "Prerequisites for Installing the PEM Server on Linux Platforms" When installing a PEM server on a Linux host, you must first install a backend database cluster which will hold the `pem` database. The PEM server’s backend database may be installed via package for Linux. The backend database must be one of the following versions: - EDB Postgres Advanced Server version 11 or above - + - PostgreSQL version 11 or above For detailed information about installing an Advanced Server or PostgreSQL database, please see the product documentation at the EDB website. @@ -25,23 +25,24 @@ title: "Prerequisites for Installing the PEM Server on Linux Platforms" Where, `x` is the server version. 4. Ensure that the `sslutils` extension is installed. - + - On an Advanced Server backend database, the `sslutils` extension is installed by default. - + - If you are using a PostgreSQL backend database, ensure you have access to the PostgreSQL community repository, and use the command: - + ```text yum install sslutils_ ``` - + Where, `x` is the server version. - + Please note that Debian 10 and Ubuntu 20 has increased the requirements to accept the certificates due to security reason. If a user wants to install the PEM Agent on any of the machines, they must upgrade `ssltuils` to 1.3 where 4096 bit RSA key and sha256 signature algorithm support has added.If the user does not upgrade `sslutils` to 1.3, then PEM Agent may fail to connect to the PEM backend database server, and it might log the error `ca md too weak`. + 5. Adjust your firewall restrictions. - + If you are using a firewall, you must allow access to port `8443` on the PEM backend database: - + ```text firewall-cmd --permanent --zone=public --add-port=8443/tcp @@ -51,6 +52,7 @@ title: "Prerequisites for Installing the PEM Server on Linux Platforms" 6. Request credentials that allow you to access the EDB repositories: To install the PEM Server, you must have credentials that allow access to the EDB repository. To request credentials for the repository, contact [EDB](https://www.enterprisedb.com/repository-access-request) . When using commands in the sections that follow, replace the `username` and `password` placeholders with the credentials provided by EDB + 7. PEM is dependent on third-party components from the vendor repository, including the python3, libboost, openssl, snmp++, libcurl, etc. To ensure these components are up to date, you should update your operating system using following platform-specific commands. Minimum version require for openssl is 1.0.2k. To upgrade packages on a CentOS or RHEL 7.x host diff --git a/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/07_registering_a_pem_agent.mdx b/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/07_registering_a_pem_agent.mdx index abc0aac628f..639e96a9151 100644 --- a/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/07_registering_a_pem_agent.mdx +++ b/product_docs/docs/pem/8.0.1/pem_inst_guide_linux/04_installing_postgres_enterprise_manager/07_registering_a_pem_agent.mdx @@ -12,26 +12,26 @@ pemworker --register-agent Append command line options to the command string when invoking the pemworker utility. Each option should be followed by a corresponding value: -| Option | Description | -| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `--pem-server` | Specifies the IP address of the PEM backend database server. This parameter is required. | -| `--pem-port` | Specifies the port of the PEM backend database server. The default value is 5432. | -| `--pem-user` | Specifies the name of the Database user (having superuser privileges) of the PEM backend database server. This parameter is required. | -| `--pem-agent-user` | Specifies the Agent user to connect the PEM server backend database server. | -| `--cert-path` | Specifies the complete path to the directory in which certificates will be created. If you do not provide a path, certificates will be created in: On Linux, ~/.pem On Windows, %APPDATA%/pem | -| `--config-dir` | Specifies the directory path where configuration file can be found. The default is the <pemworker path>/../etc. | -| `--display-name` | Specifies a user-friendly name for the Agent that will be displayed in the PEM Browser tree control. The default is the system hostname. | -| `--force-registration` | Include the force_registration clause to instruct the PEM server to register the Agent with the arguments provided; this clause is useful if you are overriding an existing Agent configuration. The default value is Yes. | -| `--group` | The name of the group in which the Agent will be displayed. | -| `--team` | The name of the database role, on the PEM backend database server, that should have access to the monitored database server. | -| `--owner` | The name of the database user, on the PEM backend database server, who will own the Agent. | -| `--allow_server_restart` | Enable the allow-server_restart parameter to allow PEM to restart the monitored server. The default value is True. | -| `--allow-batch-probes` | Enable the allow-batch-probes parameter to allow PEM to run batch probes on this Agent. The default value is False. | -| `--batch-script-user` | Specifies the operating system user that should be used for executing the batch/shell scripts. The default value is none; the scripts will not be executed if you leave this parameter blank or the specified user does not exist. | -| `--enable-heartbeat-connection` | Enable the enable-heartbeat-connection parameter to create a dedicated heartbeat connection between PEM Agent and server to update the active status. The default value is False. | -| `--enable-smtp` | When set to true for multiple PEM Agents (7.13 or lesser) it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) it may send lesser duplicate emails. | -| `--enable-snmp` | When set to true for multiple PEM Agents (7.13 or lesser) it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) it may send lesser duplicate traps. | -| `-o` | Specify if you want to override the configuration file options. | +| Option | Description | +| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `--pem-server` | Specifies the IP address of the PEM backend database server. This parameter is required. | +| `--pem-port` | Specifies the port of the PEM backend database server. The default value is 5432. | +| `--pem-user` | Specifies the name of the Database user (having superuser privileges) of the PEM backend database server. This parameter is required. | +| `--pem-agent-user` | Specifies the Agent user to connect the PEM server backend database server. | +| `--cert-path` | Specifies the complete path to the directory in which certificates will be created. If you do not provide a path, certificates will be created in: On Linux, ~/.pem On Windows, %APPDATA%/pem | +| `--config-dir` | Specifies the directory path where configuration file can be found. The default is the <pemworker path>/../etc. | +| `--display-name` | Specifies a user-friendly name for the Agent that will be displayed in the PEM Browser tree control. The default is the system hostname. | +| `--force-registration` | Include the force_registration clause to instruct the PEM server to register the Agent with the arguments provided; this clause is useful if you are overriding an existing Agent configuration. The default value is Yes. | +| `--group` | The name of the group in which the Agent will be displayed. | +| `--team` | The name of the database role, on the PEM backend database server, that should have access to the monitored database server. | +| `--owner` | The name of the database user, on the PEM backend database server, who will own the Agent. | +| `--allow_server_restart` | Enable the allow-server_restart parameter to allow PEM to restart the monitored server. The default value is True. | +| `--allow-batch-probes` | Enable the allow-batch-probes parameter to allow PEM to run batch probes on this Agent. The default value is False. | +| `--batch-script-user` | Specifies the operating system user that should be used for executing the batch/shell scripts. The default value is none; the scripts will not be executed if you leave this parameter blank or the specified user does not exist. | +| `--enable-heartbeat-connection` | Enable the enable-heartbeat-connection parameter to create a dedicated heartbeat connection between PEM Agent and server to update the active status. The default value is False. | +| `--enable-smtp` | When set to true for multiple PEM Agents (7.13 or lesser) it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) it may send lesser duplicate emails. | +| `--enable-snmp` | When set to true for multiple PEM Agents (7.13 or lesser) it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) it may send lesser duplicate traps. | +| `-o` | Specify if you want to override the configuration file options. | If you want to use any PEM feature for which a database server `restart` is required by the pemagent (such as Audit Manager, Log Manager, or the Tuning Wizard), then you must set the value for `allow_server_restart` to `true` in the `agent.cfg` file. diff --git a/product_docs/docs/pem/8.0.1/pem_inst_guide_windows/05_the_pem_web_interface.mdx b/product_docs/docs/pem/8.0.1/pem_inst_guide_windows/05_the_pem_web_interface.mdx index fcddad6879f..3c196c9a37b 100644 --- a/product_docs/docs/pem/8.0.1/pem_inst_guide_windows/05_the_pem_web_interface.mdx +++ b/product_docs/docs/pem/8.0.1/pem_inst_guide_windows/05_the_pem_web_interface.mdx @@ -36,4 +36,4 @@ To access online help information about the PEM web interface, select `Help` fro - The `PEM Enterprise Features Guide` contains information about using the tools and wizards that are part of the web interface. - The `PEM Agent User Guide` contains helpful information about managing your PEM Agents. - The `PEM Upgrade and Migration Guide` contains information about upgrading PEM to its latest version from a previous version. -- The `PEM PgBouncer Configuration Guide` contains information about using PgBouncer with your PEM installation. \ No newline at end of file +- The `PEM PgBouncer Configuration Guide` contains information about using PgBouncer with your PEM installation. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/01_pem_architecture.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/01_pem_architecture.mdx index 39d4bf91e18..77b5728c9bd 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/01_pem_architecture.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/01_pem_architecture.mdx @@ -4,7 +4,6 @@ title: "PEM Architecture" - Postgres Enterprise Manager (PEM) is a tool designed to monitor and manage multiple Postgres servers through a single GUI interface. PEM is capable of monitoring the following areas of the infrastructure: Note: The term Postgres refers to either PostgreSQL or EDB Postgres Advanced Server. @@ -18,7 +17,7 @@ PEM consists of a number of individual software components; the individual compo - **PEM Server** - The PEM Server is used as the data repository for monitoring data and as a server to which both Agents and Clients connect. The PEM server consists of an instance of PostgreSQL and an associated database for storage of monitoring data, and a server that provides web services. - **PEM Agent** - The PEM Agent is responsible for executing tasks and reporting statistics from the Agent host and monitored Postgres instances to the PEM server. A single PEM Agent can monitor multiple installed instances of Postgres that reside on one or many hosts. - **PEM Web Client** - The PEM web interface allows you to manage and monitor Postgres servers and utilize PEM extended functionality. The web interface software is installed with the PEM server and is accessed via any supported web browser. -- **SQL Profiler** - SQL Profiler is a Postgres server plugin to record the monitoring data and query plans to be analysed by the SQL Profiler tool in PEM. This is an optional component of PEM, but the plugin must be installed into each instance of Postgres with which you wish to use the SQL Profiler tool. The SQL Profiler may be used with any supported version of an EnterpriseDB distribution of a PostgreSQL server or Advanced Server (not just those managed through the PEM server). See the [PEM SQL Profiler Configuration Guide](https://www.enterprisedb.com/edb-docs/p/edb-postgres-enterprise-manager) for details and supported versions. +- **SQL Profiler** - SQL Profiler is a Postgres server plugin to record the monitoring data and query plans to be analysed by the SQL Profiler tool in PEM. This is an optional component of PEM, but the plugin must be installed into each instance of Postgres with which you wish to use the SQL Profiler tool. The SQL Profiler may be used with any supported version of an EnterpriseDB distribution of a PostgreSQL server or Advanced Server (not just those managed through the PEM server). See the [PEM SQL Profiler Configuration Guide](/pem/latest/) for details and supported versions. **PEM architecture** @@ -75,7 +74,7 @@ Once configured, each agent collects statistics and other information on the hos - Table access statistics - Table and index sizes -A list of PEM probes can be found [here](../04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/#pem_probes). +A list of PEM probes can be found [here](../04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/01_pem_probes/#pem_probes). By default, the PEM Agent bound to the database server collects the OS/Database monitoring statistics and also runs any scheduled tasks/jobs for that particular database server, storing data in the pem database on the PEM server. @@ -97,4 +96,4 @@ The plugin is installed with the EDB Postgres Advanced Server distribution but m SQL Profiler may be used on servers that are not managed through PEM, but to perform scheduled traces, a server must have the plugin installed, and must be managed by an installed and configured PEM agent. -For more information about using SQL Profiler, see the [PEM SQL Profiler Configuration Guide](https://www.enterprisedb.com/edb-docs/p/edb-postgres-enterprise-manager) +For more information about using SQL Profiler, see the [PEM SQL Profiler Configuration Guide](/pem/latest/) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/02_pem_server_logon.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/02_pem_server_logon.mdx index 2ec3b0c35aa..1da73953e33 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/02_pem_server_logon.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/02_pem_server_logon.mdx @@ -4,10 +4,9 @@ title: "PEM Server Logon" - The PEM web interface uses Apache to connect to the PEM server on port 8080 of the IP address on which the PEM server is installed. To connect to PEM, open your browser of choice, and navigate to: -> *<ip\_address\_of\_PEM\_host>:8080/pem* +> *<ip_address_of_PEM_host>:8080/pem* Where `ip_address_of_PEM_host` specifies the IP address of the host of the PEM server. @@ -20,9 +19,9 @@ Use the fields on the Login window to authenticate yourself with the PEM server: We strongly recommend you create an individual role for each user. You can create a login role with the `CREATE ROLE` SQL statement, or by defining a role with the PEM client `Create - Login/Group Role` dialog. To access the dialog, connect to the PEM server database; right-click the `Login/Group Roles` node in the tree control, and select `New Login Role...` from the `Create` pull-aside menu. Roles must be granted permissions and role memberships to properly use PEM: - users that are members of the `pem_user` role are essentially `read-only` users; they may view dashboards, change the database server connection options, but they will not be able to install agents or configure the server directory, alerts, probes, or run any of the wizard/dialog based components of PEM. - - users that are members of the `pem_admin` role have the same read permissions as members of the pem\_user role, plus sufficient privileges to configure the servers, directory, alerts and probes. - - `administrative` users must be added to the pem\_admin role and explicitly granted the create role privilege. in addition to the permissions granted through membership in the pem\_admin role, the create role privilege allows an administrator to create additional pem users, and to install and register new agents. - - users can be member of one of the [PEM roles](../01_toc_pem_getting_started/#pem_roles) to give right to run a particular component, to manage, or to configure PEM. + - users that are members of the `pem_admin` role have the same read permissions as members of the pem_user role, plus sufficient privileges to configure the servers, directory, alerts and probes. + - `administrative` users must be added to the pem_admin role and explicitly granted the create role privilege. in addition to the permissions granted through membership in the pem_admin role, the create role privilege allows an administrator to create additional pem users, and to install and register new agents. + - users can be member of one of the [PEM roles](04_pem_roles/#pem_roles) to give right to run a particular component, to manage, or to configure PEM. - Provide the password associated with the user in the `Password` field. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/03_pem_managing_configuration_settings.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/03_pem_managing_configuration_settings.mdx index 0cb08ae21ba..0b342264ca3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/03_pem_managing_configuration_settings.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/03_pem_managing_configuration_settings.mdx @@ -4,7 +4,6 @@ title: "Managing Configuration Settings" - There are multiple configuration files that are read at startup by Postgres Enterprise Manager. These are as follows: - `config.py`: This is the main configuration file, and should not be modified. It can be used as a reference for configuration settings, that may be overridden in one of the following files. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/04_pem_roles.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/04_pem_roles.mdx index 2a5c6f37562..421fb02b850 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/04_pem_roles.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/04_pem_roles.mdx @@ -4,12 +4,11 @@ title: "Roles for managing PEM" - You can use the `Login/Group Role` dialog to allow a role with limited privileges to access PEM features such as the Audit Manager, Capacity Manager, or SQL Profiler. PEM pre-defined roles allow access to PEM functionality; roles that are assigned membership in these roles can access the associated feature. ![Role dialog membership tab](../images/role_dialog_membership.png) -When defining a user, use the `Membership` tab to specify the roles in which the new user is a member. The new user will share the privileges associated with each role in which it is a member. For a user to have access to PEM extended functionality, the role must be a member of the pem\_user role and the pre-defined role that grants access to the feature. Use the `Roles` field to select pre-defined role names from a drop down list. +When defining a user, use the `Membership` tab to specify the roles in which the new user is a member. The new user will share the privileges associated with each role in which it is a member. For a user to have access to PEM extended functionality, the role must be a member of the pem_user role and the pre-defined role that grants access to the feature. Use the `Roles` field to select pre-defined role names from a drop down list. Check the checkbox to the right of the role name to allow administrative access to the functionality. @@ -21,31 +20,31 @@ The examples shown above creates a login role named `acctg_clerk` that will have You can use PEM pre-defined roles to allow access to the functionality listed in the table below: -| Value | Parent Role | Description | -|-------------------------------------|---------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| pem\_super\_admin | | Role for administration/management/configuration of all the objects within Postgres Enterprise Manager console. | -| pem\_admin | pem\_super\_admin | Role for administration/management/configuration of all the agents, servers, or monitored objects that are visible to a user having pem\_admin role. A user with pem\_admin role can view and manage only those objects where this role has been mentioned in the Team field under the server's properties. | -| pem\_user | | Role for having read-only access to all the agents, servers, or monitored objects that are visible to a user having pem\_user role. A user with pem\_user role can view only those objects where this role has been mentioned in the Team field under the server's properties. | -| pem\_config | pem\_admin | Role for configuration management of Postgres Enterprise Manager. | -| pem\_component | pem\_admin | Role to run/execute all wizard/dialog based components. | -| pem\_rest\_api | pem\_admin | Role to access the REST API. | -| pem\_server\_service\_manager | pem\_admin | Role for allowing to restart/reload the monitored database server (if server-id provided). | -| pem\_manage\_schedule\_task | pem\_admin | Role to configure the schedule tasks. | -| pem\_manage\_alert | pem\_admin | Role for managing/configuring alerts, and its templates. | -| pem\_config\_alert | pem\_config, pem\_manage\_alert | Role for configuring the alerts on any monitored objects. | -| pem\_manage\_probe | pem\_admin | Role to create, update, delete the custom probes, and change custom probe configuration. | -| pem\_config\_probe | pem\_config, pem\_manage\_probe | Role for probe configuration (history retention, execution frequency, enable/disble the probe) on all visible monitored objects. | -| pem\_database\_server\_registration | pem\_admin | Role to register a database server. | -| pem\_comp\_postgres\_expert | pem\_component | Role to run the Postgres Expert. | -| pem\_comp\_auto\_discovery | pem\_component | Role to run the Auto discovery of a database server dialog. | -| pem\_comp\_log\_analysis\_expert | pem\_component | Role to run the Log Analysis Expert. | -| pem\_comp\_sqlprofiler | pem\_component | Role to run the SQL Profiler. | -| pem\_manage\_efm | pem\_admin | Role to manage Failover Manager functionalities. | -| pem\_comp\_capacity\_manager | pem\_component | Role to run the Capacity Manager. | -| pem\_comp\_log\_manager | pem\_component | Role to run the Log Manager. | -| pem\_comp\_audit\_manager | pem\_component | Role to run the Audit Manager. | -| pem\_comp\_tuning\_wizard | pem\_component | Role to run the Tuning Wizard. | -| pem\_comp\_bart | pem\_component | Role to configure and manage BART server. | +| Value | Parent Role | Description | +| -------------------------------- | ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| pem_super_admin | | Role for administration/management/configuration of all the objects within Postgres Enterprise Manager console. | +| pem_admin | pem_super_admin | Role for administration/management/configuration of all the agents, servers, or monitored objects that are visible to a user having pem_admin role. A user with pem_admin role can view and manage only those objects where this role has been mentioned in the Team field under the server's properties. | +| pem_user | | Role for having read-only access to all the agents, servers, or monitored objects that are visible to a user having pem_user role. A user with pem_user role can view only those objects where this role has been mentioned in the Team field under the server's properties. | +| pem_config | pem_admin | Role for configuration management of Postgres Enterprise Manager. | +| pem_component | pem_admin | Role to run/execute all wizard/dialog based components. | +| pem_rest_api | pem_admin | Role to access the REST API. | +| pem_server_service_manager | pem_admin | Role for allowing to restart/reload the monitored database server (if server-id provided). | +| pem_manage_schedule_task | pem_admin | Role to configure the schedule tasks. | +| pem_manage_alert | pem_admin | Role for managing/configuring alerts, and its templates. | +| pem_config_alert | pem_config, pem_manage_alert | Role for configuring the alerts on any monitored objects. | +| pem_manage_probe | pem_admin | Role to create, update, delete the custom probes, and change custom probe configuration. | +| pem_config_probe | pem_config, pem_manage_probe | Role for probe configuration (history retention, execution frequency, enable/disble the probe) on all visible monitored objects. | +| pem_database_server_registration | pem_admin | Role to register a database server. | +| pem_comp_postgres_expert | pem_component | Role to run the Postgres Expert. | +| pem_comp_auto_discovery | pem_component | Role to run the Auto discovery of a database server dialog. | +| pem_comp_log_analysis_expert | pem_component | Role to run the Log Analysis Expert. | +| pem_comp_sqlprofiler | pem_component | Role to run the SQL Profiler. | +| pem_manage_efm | pem_admin | Role to manage Failover Manager functionalities. | +| pem_comp_capacity_manager | pem_component | Role to run the Capacity Manager. | +| pem_comp_log_manager | pem_component | Role to run the Log Manager. | +| pem_comp_audit_manager | pem_component | Role to run the Audit Manager. | +| pem_comp_tuning_wizard | pem_component | Role to run the Tuning Wizard. | +| pem_comp_bart | pem_component | Role to configure and manage BART server. |
@@ -55,6 +54,6 @@ Note
-The difference between pem\_admin role and pem\_super\_admin role is that a user with pem\_admin role can view and manage only those objects where the role has been mentioned in the Team field under the server's properties, while a user with pem\_super\_admin role can view and manage all the objects within Postgres Enterprise Manager console. +The difference between pem_admin role and pem_super_admin role is that a user with pem_admin role can view and manage only those objects where the role has been mentioned in the Team field under the server's properties, while a user with pem_super_admin role can view and manage all the objects within Postgres Enterprise Manager console. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/05_group_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/05_group_dialog.mdx index 43ba5a3bc88..ec785d49c8b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/05_group_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/05_group_dialog.mdx @@ -4,7 +4,6 @@ title: "The Group Dialog" - Use the `Group` dialog to add a new group to the PEM client tree control. You can use a group to simplify management of related servers or agents. ![Create Group dialog](../images/create_group.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/06_auto_discovery_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/06_auto_discovery_dialog.mdx index cd1925544e4..fad595cb115 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/06_auto_discovery_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/06_auto_discovery_dialog.mdx @@ -4,7 +4,6 @@ title: "Automatic Discovery of Servers" - Use the `Auto Discovery` dialog to instruct a PEM agent to locate database servers that reside on a monitored system, and add a binding that allows the agent to monitor the selected server. To enable auto discovery for a specific agent, you must enable the `Server Auto Discovery` probe. To access the `Manage Probes` tab, highlight the name of a PEM agent in the PEM client tree control, and select `Manage Probes...` from the `Management` menu. When the Manage Probes tab opens, confirm that the slider control in the `Enabled?` column is set to `Yes`. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/07_pem_define_connection.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/07_pem_define_connection.mdx index 419abef0210..ac098e6ac67 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/07_pem_define_connection.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/07_pem_define_connection.mdx @@ -4,7 +4,6 @@ title: "Defining a server" - Use the `Create - Server` dialog to describe a new server connection, bind the server to a PEM agent, and display the server to the PEM tree control. ![Create Server dialog - General tab](../images/create_server_general_tab.png) @@ -12,7 +11,7 @@ Use the `Create - Server` dialog to describe a new server connection, bind the s Use the fields on the `General` tab to describe the general properties of the server: - Use the `Name` field to specify a user-friendly name for the server. The name specified will identify the server in the PEM client tree control. -- You can use [groups](../01_toc_pem_getting_started/#group_dialog) to organize your servers and agents in the PEM client tree control. Using groups can help you manage large numbers of servers more easily. For example, you may want to have a production group, a test group, or LAN specific groups. Use the `Group` drop-down listbox to select the group in which the new server will be displayed. +- You can use [groups](05_group_dialog/#group_dialog) to organize your servers and agents in the PEM client tree control. Using groups can help you manage large numbers of servers more easily. For example, you may want to have a production group, a test group, or LAN specific groups. Use the `Group` drop-down listbox to select the group in which the new server will be displayed. - Use the `Team` field to specify a PostgreSQL role name. Only PEM users who are members of this role, who created the server initially, or have superuser privileges on the PEM server will see this server when they logon to PEM. If this field is left blank, all PEM users will see the server. - Use the `Background` color selector to select the color that will be displayed in the PEM tree control behind database objects that are stored on the server. - Use the `Foreground` color selector to select the font color of labels in the PEM tree control for objects stored on the server. @@ -66,13 +65,13 @@ Use the fields on the `SSH Tunnel` tab to configure SSH Tunneling. You can use a Use fields on the `Advanced` tab to specify details that are used to manage the server: - Specify the IP address of the server host in the `Host Address` field. -- Use the `DB restriction` field to specify a SQL restriction that will be used against the [pg\_database](http://www.postgresql.org/docs/current/interactive/catalog-pg-database.html) table to limit the databases displayed in the tree control. For example, you might enter: `'live_db', 'test_db'` to instruct the PEM browser to display only the live\_db and test\_db databases. +- Use the `DB restriction` field to specify a SQL restriction that will be used against the [pg_database](http://www.postgresql.org/docs/current/interactive/catalog-pg-database.html) table to limit the databases displayed in the tree control. For example, you might enter: `'live_db', 'test_db'` to instruct the PEM browser to display only the live_db and test_db databases. - Use the `Password file` field to specify the location of a password file (.pgpass). The .pgpass file allows a user to login without providing a password when they connect, and it must be present on the PEM server. For more information, see [Section 33.15 of the Postgres documentation](http://www.postgresql.org/docs/current/static/libpq-pgpass.html). Please note: Use of a password file is only supported when PEM is using libpq v10.0 or later to connect to the server. - Use the `Service ID` field to specify parameters to control the database service process. For servers that are stored in the Enterprise Manager directory, enter the service ID. On Windows machines, this is the identifier for the Windows service. On \*nix machines, this is the name of the init script used to start the server in /etc/init.d. An example of an ID on all platforms is `postgresql-9.0`. For local servers, the setting is operating system dependent: - If the PEM client is running on a Windows machine, it can control the postmaster service if you have enough access rights. Enter the name of the service. In case of a remote server, it must be prepended by the machine name (e.g. PSE1\\pgsql-8.0). PEM will automatically discover services running on your local machine. - - If the PEM client is running on a Unix machine, it can control processes running on the local machine if you have enough access rights. Enter a full path and needed options to access the pg\_ctl program. When executing service control functions, PEM will append status/start/stop keywords to this. For example: `sudo /usr/local/pgsql/bin/pg_ctl -D /data/pgsql` -- If the server is a member of a [Failover Manager](../04_toc_pem_features/#monitoring_a_failover_manager_cluster) cluster, you can use PEM to monitor the health of the cluster and to replace the primary node if necessary. To enable PEM to monitor Failover Manager, use the `EFM cluster name` field to specify the cluster name. The cluster name is the prefix of the name of the Failover Manager cluster properties file. For example, if the cluster properties file is named `efm.properties`, the cluster name is `efm`. -- If you are using PEM to monitor the status of a [Failover Manager](../04_toc_pem_features/#monitoring_a_failover_manager_cluster) cluster, use the `EFM installation path` field to specify the location of the Failover Manager binary file. By default, the Failover Manager binary file is installed in `/usr/efm-2.x/bin`, where `x` specifies the Failover Manager version. + - If the PEM client is running on a Unix machine, it can control processes running on the local machine if you have enough access rights. Enter a full path and needed options to access the pg_ctl program. When executing service control functions, PEM will append status/start/stop keywords to this. For example: `sudo /usr/local/pgsql/bin/pg_ctl -D /data/pgsql` +- If the server is a member of a [Failover Manager](../04_toc_pem_features/19_monitoring_a_failover_manager_cluster/#monitoring_a_failover_manager_cluster) cluster, you can use PEM to monitor the health of the cluster and to replace the primary node if necessary. To enable PEM to monitor Failover Manager, use the `EFM cluster name` field to specify the cluster name. The cluster name is the prefix of the name of the Failover Manager cluster properties file. For example, if the cluster properties file is named `efm.properties`, the cluster name is `efm`. +- If you are using PEM to monitor the status of a [Failover Manager](../04_toc_pem_features/19_monitoring_a_failover_manager_cluster/#monitoring_a_failover_manager_cluster) cluster, use the `EFM installation path` field to specify the location of the Failover Manager binary file. By default, the Failover Manager binary file is installed in `/usr/efm-2.x/bin`, where `x` specifies the Failover Manager version. ![Create Server dialog - PEM Agent tab](../images/create_server_pem_agent_tab.png) @@ -85,7 +84,7 @@ Use fields on the `PEM Agent` tab to specify connection details for the PEM agen - Use the drop-down listbox in the `SSL` field to specify an SSL operational mode; select from require, prefer, allow, disable, verify-ca or verify-full. | Mode | Description: | -|-------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------- | | require | To require SSL encryption for transactions between the server and the agent. | | prefer | To use SSL encryption between the server and the agent if SSL encryption is available. | | allow | To allow the connection to use SSL if required by the server. | @@ -98,8 +97,8 @@ Use fields on the `PEM Agent` tab to specify connection details for the PEM agen - Use the `Database` field to specify the name of the Postgres Plus database to which the agent will initially connect. - Specify the name of the user that agent should use when connecting to the server in the `User name` field. Note that if the specified user is not a database superuser, then some of the features will not work as expected. If you are using Postgres version 10 or above, you can use the `pg_monitor` role to grant the required privileges to a non-superuser. For information about `pg_monitor` role, see [Default Roles](https://www.postgresql.org/docs/current/default-roles.html). - Specify the password that the agent should use when connecting to the server in the `Password` field, and verify it by typing it again in the `Confirm password` field. If you do not specify a password, you will need to configure the authentication for the agent manually; you can use a `.pgpass` file for example. -- Specify `Yes` in the `Allow takeover?` field to specify that another agent may be signaled (for example, by a fencing script) to monitor the server. This feature allows an agent to take responsibility for the monitoring of the database server if, for example, the server is part of a [high availability](../02_toc_pem_agent/02_pem_agent_binding/#pem_agent_ha) failover process. +- Specify `Yes` in the `Allow takeover?` field to specify that another agent may be signaled (for example, by a fencing script) to monitor the server. This feature allows an agent to take responsibility for the monitoring of the database server if, for example, the server is part of a [high availability](../02_toc_pem_agent/02_pem_agent_binding/02_pem_agent_ha/#pem_agent_ha) failover process. -If you experience connection problems, please visit the [connection problems](../01_toc_pem_getting_started/#connect_error) page. +If you experience connection problems, please visit the [connection problems](11_connect_error/#connect_error) page. To view the properties of a server, right-click on the server name in the PEM client tree control, and select the `Properties...` option from the context menu. To modify a server's properties, disconnect from the server before opening the `Properties` dialog. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/08_pem_define_aws_instance_connection.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/08_pem_define_aws_instance_connection.mdx index fd23ed656a5..708cbcc37b5 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/08_pem_define_aws_instance_connection.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/08_pem_define_aws_instance_connection.mdx @@ -2,8 +2,6 @@ title: "Defining and Monitoring Postgres instances on AWS" --- - - There are two scenarios in which you can monitor a Postgres instance on an AWS host with PEM: > - Postgres Instance running on AWS EC2 @@ -29,9 +27,9 @@ In the first two scenarios, you must configure the VPN on AWS EC2 , so the AWS E -The PEM Agent running on AWS EC2 or on your local machine should be registered to the PEM Server. Please note that when registering the PEM Agent with the PEM Server you should use the hostname of AWS EC2 instance. For more details on registering the PEM Agent see, [PEM Self Registration](../02_toc_pem_agent/#pem_agent_self_registration). +The PEM Agent running on AWS EC2 or on your local machine should be registered to the PEM Server. Please note that when registering the PEM Agent with the PEM Server you should use the hostname of AWS EC2 instance. For more details on registering the PEM Agent see, [PEM Self Registration](../02_toc_pem_agent/07_pem_agent_self_registration/#pem_agent_self_registration). -You can register the Postgres instance running on AWS EC2 on PEM Server using the `Create - Server` dialog. For more details on registering the server using `Create - Server` dialog see, [Define a Server](../01_toc_pem_getting_started/#pem_define_connection). Use the `PEM Agent` tab on the `Create - Server` dialog to bind the registered PEM Agent with the Postgres instance. +You can register the Postgres instance running on AWS EC2 on PEM Server using the `Create - Server` dialog. For more details on registering the server using `Create - Server` dialog see, [Define a Server](07_pem_define_connection/#pem_define_connection). Use the `PEM Agent` tab on the `Create - Server` dialog to bind the registered PEM Agent with the Postgres instance. When the PEM Agent is registered to the PEM Server and your Postgres instance that is running on AWS EC2 is registered to the PEM Server, you can monitor your instance with PEM. @@ -39,7 +37,7 @@ When the PEM Agent is registered to the PEM Server and your Postgres instance th While creating an AWS RDS database, choose `PostgreSQL` when prompted for `Engine options`. After creating a `Postgres(RDS)` instance on AWS, use `Create - Server` dialog to add the `Postgres(RDS)` instance to the PEM Server. Using this dialog you can describe a new server connection, bind the server to a PEM Agent, and display the server to the PEM browser tree control. -For detailed information on the `Create - Server` dialog and configuration details for each tab, see [Define a Server](../01_toc_pem_getting_started/#pem_define_connection). +For detailed information on the `Create - Server` dialog and configuration details for each tab, see [Define a Server](07_pem_define_connection/#pem_define_connection). The `PEM Agent` tab in the `Create - Server` dialog must have the `Remote Monitoring` field set to `Yes` to monitor the `Postgres(RDS)` instance on AWS instance using PEM Server. @@ -47,19 +45,19 @@ The `PEM Agent` tab in the `Create - Server` dialog must have the `Remote Monito As the PEM Agent will be monitoring the Postgres(RDS) AWS instance remotely, the functionality will be limited as described below: -| **Feature Name** | **Works with remote PEM Agent** | **Comments** | -|------------------------------|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Audit Manager | No | | -| Capacity Manager | Limited | There will be no correlation between the database server and operating system metrices. | -| Log Manager | No | | -| Manage Alerts | Limited | When you run an alert script on the database server, it will run on the machine where the bound PEM Agent is running, and not on the actual database server machine. | -| Manage Charts | Yes | | -| Manage Dashboards | Limited | Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as it is not available. | -| Manage Probes | Limited | Some of the PEM probes will not return information, and some of the functionalities may be affected. For details about probe functionality, see the [PEM Agent Guide](https://enterprisedb.com/edb-docs). | -| Postgres Expert | Limited | The Postgres Expert will provide partial information as operating system information is not available. | -| Postgres Log Analysis Expert | No | The Postgres Log Analysis Expert will not be able to perform an analysis as it is dependent on the logs imported by log manager, which will not work as required. | -| Scheduled Tasks | Limited | Scheduled tasks will work only for database server; scripts will run on a remote Agent. | -| Tuning Wizard | No | | -| System Reports | Yes | | -| Core Usage Reports | Limited | The Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed. | -| Managing BART | No | BART requires password less authentication between two machines, where database server and BART are installed. An AWS RDS instance doesn't allow to use host access. | +| **Feature Name** | **Works with remote PEM Agent** | **Comments** | +| ---------------------------- | ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Audit Manager | No | | +| Capacity Manager | Limited | There will be no correlation between the database server and operating system metrices. | +| Log Manager | No | | +| Manage Alerts | Limited | When you run an alert script on the database server, it will run on the machine where the bound PEM Agent is running, and not on the actual database server machine. | +| Manage Charts | Yes | | +| Manage Dashboards | Limited | Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as it is not available. | +| Manage Probes | Limited | Some of the PEM probes will not return information, and some of the functionalities may be affected. For details about probe functionality, see the [PEM Agent Guide](/pem/latest/pem_agent/). | +| Postgres Expert | Limited | The Postgres Expert will provide partial information as operating system information is not available. | +| Postgres Log Analysis Expert | No | The Postgres Log Analysis Expert will not be able to perform an analysis as it is dependent on the logs imported by log manager, which will not work as required. | +| Scheduled Tasks | Limited | Scheduled tasks will work only for database server; scripts will run on a remote Agent. | +| Tuning Wizard | No | | +| System Reports | Yes | | +| Core Usage Reports | Limited | The Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed. | +| Managing BART | No | BART requires password less authentication between two machines, where database server and BART are installed. An AWS RDS instance doesn't allow to use host access. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/09_pem_connect_to_server.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/09_pem_connect_to_server.mdx index 613972609f2..0f4b0567bcc 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/09_pem_connect_to_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/09_pem_connect_to_server.mdx @@ -4,7 +4,6 @@ title: "Connect to server" - After defining a server connection, use the `Connect to Server` dialog to authenticate with a server and access the objects stored on the server. To access the dialog, right click on the server name in the PEM client tree control, and select `Connect Server` from the context menu. ![PEM Connect to Server dialog](../images/connect_to_server.png) @@ -16,7 +15,7 @@ If prompted, provide authentication information for the selected server: The browser displays a message in a green status bar in the lower right corner when the server connects successfully. -If you receive an error message while attempting a connection, verify that your network is allowing PEM and the host of the database server to communicate. For detailed information about a specific error message, please see the [Connection Error](../01_toc_pem_getting_started/#connect_error) help page. +If you receive an error message while attempting a connection, verify that your network is allowing PEM and the host of the database server to communicate. For detailed information about a specific error message, please see the [Connection Error](11_connect_error/#connect_error) help page. To review or modify connection details, right-click on the name of the server, and select `Properties...` from the context menu. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/10_control_server.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/10_control_server.mdx index bb6c9599333..5fa54ee4272 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/10_control_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/10_control_server.mdx @@ -4,8 +4,7 @@ title: "Controlling a Server" - -If you provided a `Service ID` on the `Advanced` tab of the [Server](../01_toc_pem_getting_started/#pem_define_connection) property dialogue, the PEM server can control the database service process. +If you provided a `Service ID` on the `Advanced` tab of the [Server](07_pem_define_connection/#pem_define_connection) property dialogue, the PEM server can control the database service process. > - If the PEM client is running on a Windows machine, it can control the postmaster service if you have sufficient access rights. In case of a remote server, the service name must be prepended by the machine name (e.g. PSE1pgsql-8.0). > - If the PEM client is running on a Unix machine, it can control processes running on the local machine if you have sufficient access rights. When executing service control functions, PEM will append status/start/stop keywords to the service name provided. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/11_connect_error.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/11_connect_error.mdx index a07e874eb92..0708b940c6a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/11_connect_error.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/11_connect_error.mdx @@ -4,7 +4,6 @@ title: "Connection error" - When connecting to a PostgreSQL server, you may get an error message. If you encounter an error message, please review the message carefully; each error message attempts to incorporate the information you'll need to resolve the problem. For more details about specific errors, please locate the error message in the list below: **Connection to the server has been lost** @@ -18,20 +17,21 @@ This error message indicates that the connection attempt has taken longer than t ![Could not connect to server](../images/ce_not_running.png) If pgAdmin displays this message, there are two possible reasons for this: + - the database server isn't running - simply start it. - the server isn't configured to accept TCP/IP requests on the address shown. -For security reasons, a PostgreSQL server "out of the box" doesn't listen on TCP/IP ports. Instead, it must be enabled to listen for TCP/IP requests. This can be done by adding **tcpip = true** to the postgresql.conf file for Versions 7.3.x and 7.4.x, or **listen\_addresses='\*'** for Version 8.0.x and above; this will make the server accept connections on any IP interface. +For security reasons, a PostgreSQL server "out of the box" doesn't listen on TCP/IP ports. Instead, it must be enabled to listen for TCP/IP requests. This can be done by adding **tcpip = true** to the postgresql.conf file for Versions 7.3.x and 7.4.x, or **listen_addresses='\*'** for Version 8.0.x and above; this will make the server accept connections on any IP interface. For further information, please refer to the PostgreSQL documentation about [runtime configuration](http://www.postgresql.org/docs/current/interactive/runtime-config.html). -**FATAL: no pg\_hba.conf entry** +**FATAL: no pg_hba.conf entry** ![No pg_hba.conf entry](../images/ce_error_hba.png) If pgAdmin displays this message when connecting, your server can be contacted correctly over the network, but is not configured to accept your connection. Your client has not been detected as a legal user for the database. -To connect to a server, the pg\_hba.conf file on the database server must be configured to accept connections from the host of the pgAdmin client. Modify the pg\_hba.conf file on the database server host, and add an entry in the form: +To connect to a server, the pg_hba.conf file on the database server must be configured to accept connections from the host of the pgAdmin client. Modify the pg_hba.conf file on the database server host, and add an entry in the form: > - **host template1 postgres 192.168.0.0/24 md5** for an IPV4 network > - **host template1 postgres ::ffff:192.168.0.0/120 md5** for an IPV6 network diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/index.mdx index 66b888c90fe..c596e78a3c0 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/01_toc_pem_getting_started/index.mdx @@ -4,14 +4,13 @@ title: "PEM Getting Started" - -You can use either a graphical installer or an RPM package to install the PEM server and PEM agent; for detailed installation instructions, please see the Postgres Enterprise Manager Installation Guide, available at www.enterprisedb.com. +You can use either a graphical installer or an RPM package to install the PEM server and PEM agent; for detailed installation instructions, please see the Postgres Enterprise Manager Installation Guide, available at [www.enterprisedb.com](http://www.enterprisedb.com). Contents:
-pem\_architecture pem\_server\_logon pem\_managing\_configuration\_settings pem\_roles +pem_architecture pem_server_logon pem_managing_configuration_settings pem_roles
@@ -23,6 +22,6 @@ Contents:
-group\_dialog auto\_discovery\_dialog pem\_define\_connection pem\_define\_aws\_instance\_connection pem\_connect\_to\_server control\_server connect\_error +group_dialog auto_discovery_dialog pem_define_connection pem_define_aws_instance_connection pem_connect_to_server control_server connect_error
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/01_pem_agent_properties.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/01_pem_agent_properties.mdx index ea508368ec1..cfbb3e42721 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/01_pem_agent_properties.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/01_pem_agent_properties.mdx @@ -4,13 +4,12 @@ title: "PEM Agent Properties" - The `PEM Agent Properties` dialog provides information about the PEM agent from which the dialog was opened; to open the dialog, right-click on an agent name in the PEM client tree control, and select `Properties` from the context menu. ![PEM Agent Properties dialog - General tab](../images/pem_agent_properties.png) - The `Description` field displays a modifiable description of the PEM agent. This description is displayed in the tree control of the PEM client. -- You can use [groups](../01_toc_pem_getting_started/#group_dialog) to organize your servers and agents in the PEM client tree control. Use the `Group` drop-down listbox to select the group in which the agent will be displayed. +- You can use [groups](../01_toc_pem_getting_started/05_group_dialog/#group_dialog) to organize your servers and agents in the PEM client tree control. Use the `Group` drop-down listbox to select the group in which the agent will be displayed. - Use the `Team` field to specify the name of the group role that should be able to access servers monitored by the agent; the servers monitored by this agent will be displayed in the PEM client tree control to connected team members. Please note that this is a convenience feature. The `Team` field does not provide true isolation, and should not be used for security purposes. - The `Heartbeat interval` fields displays the length of time that will elapse between reports from the PEM agent to the PEM server. Use the selectors next to the `Minutes` or `Seconds` fields to modify the interval. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/01_pem_agent_config_params.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/01_pem_agent_config_params.mdx index c16f844165d..b405bb5acba 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/01_pem_agent_config_params.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/01_pem_agent_config_params.mdx @@ -4,48 +4,47 @@ title: "PEM Agent Configuration Parameters" +A number of user-configurable parameters and registry entries control the behavior of the PEM Agent. With the exception of the PEM_MAXCONN (or pem_maxconn) parameter, we strongly recommend against modifying any of the configuration parameters or registry entries listed below without first consulting EnterpriseDB support experts. -A number of user-configurable parameters and registry entries control the behavior of the PEM Agent. With the exception of the PEM\_MAXCONN (or pem\_maxconn) parameter, we strongly recommend against modifying any of the configuration parameters or registry entries listed below without first consulting EnterpriseDB support experts. - -> - On 32 bit Windows systems, PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\EnterpriseDB\\PEM\\agent -> - On 64 bit Windows systems, PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent +> - On 32 bit Windows systems, PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\EnterpriseDB\\PEM\\agent +> - On 64 bit Windows systems, PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent > - On Linux systems, PEM configuration options are stored in the agent.cfg file, located (by default) in /usr/edb/pem/agent/etc -| **Parameter Name** | **Description** | **Value (if applicable)** | -|----------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PEM\_HOST (on Windows) or pem\_host (on Linux) | The IP address or hostname of the PEM server. | By default, set to 127.0.0.1. | -| PEM\_PORT (on Windows) or pem\_port (on Linux) | The database server port to which the agent connects to communicate with the PEM server. | By default, the PEM server monitors port 5432. | -| AgentID (on Windows) or agent\_id (on Linux) | A unique identifier assigned to the PEM agent. | The first agent is assigned an identifier of '1', the second agent is assigned an identifier of '2', and so on. | -| AgentUser (on Windows) or agent\_user (on Linux) | User to connect the PEM database server | If present, and not set to empty string, it will be used to connect the PEM database server. | -| AgentCrtPath (on Windows) or agent\_ssl\_crt (on Linux) | The complete path to the PEM agent's certificate file. | By default, on Windows, C:\\Users\\user\_name\\AppData\\Roaming\\pem/agent.crt. By default on Linux, /root/.pem/agent.crt. | -| AgentKeyPath (on Windows) or agent\_ssl\_key (on Linux) | The complete path to the PEM agent's key file. | By default, on Windows, C:\\Users\\user\_name\\AppData\\Roaming\\pem/agent.key. By default on Linux, /root/.pem/agent.key. | -| AgentFlagDir (on Windows) or agent\_flag\_dir (on Linux) | Used for HA support. Specifies the directory path checked for requests to take over monitoring another server. Requests are made in the form of a file in the specified flag directory. | Not set by default. This option allows you to override the hard-coded default. | -| LogLevel (on Windows) or log\_level (on Linux) | Log level specifies the type of event that will be written to the PEM log files. | Log level may be set to: error, debug1, debug2, or warning By default, log level is set to `warning` | -| log\_location (on Linux only) | Specifies the location of the PEM worker log file. | On Linux, /var/log/pem/worker.log. On Windows, Logs & errors will be reported in the Application event log. | -| agent\_log\_location (on Linux only) | Specifies the location of the PEM agent log file. | On Linux, /var/log/pem/agent.log. On Windows, Logs & errors will be reported in the Application event log. | -| ShortWait (on Windows) or short\_wait (on Linux) | The minimum length of time (in seconds) that the PEM agent will wait before checking which probes are next in the queue (waiting to run). | By default, 10 seconds. | -| LongWait (on Windows) or long\_wait (on Linux) | The maximum length of time (in seconds) that the PEM agent will wait before attempting to connect to the PEM server if an initial connection attempt fails. | By default, 30 seconds. | -| AlertThreads (on Windows) or alert\_threads (on Linux) | The number of alert threads to be spawned by the agent. | Set to 1 for the agent that resides on the host of the PEM server; should be set to 0 for all other agents. | -| EnableSMTP (on Windows) or enable\_smtp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate emails. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| EnableSNMP (on Windows) or enable\_snmp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate traps. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| enable\_nagios (on Linux only) | When set to true, Nagios alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| EnableWebhook (on Windows) or enable\_webhook (on Linux) | When set to true, Webhook alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| MaxWebhookRetries (on Windows) or max\_webhook\_retries (on Linux) | Set maximum number of times pemAgent should retry to call webhooks on failure. | Default 3. | -| ConnectTimeout (on Windows) or connect\_timeout (on Linux) | The maximum length of time (in seconds, written as a decimal integer string) that the agent will wait for a connection. | Not set by default. If set to 0, the agent will wait indefinitely. | -| AllowServerRestart (on Windows) or allow\_server\_restart (on Linux) | If set to TRUE, the agent can restart the database server that it monitors. Some PEM features may be enabled/disabled, depending on the value of this parameter. | By default, set to TRUE. | -| MaxConnections (on Windows) or max\_connections (on Linux) | The maximum number of probe connections used by the connection throttler. | By default, set to 0 (an unlimited number of connections). | -| ConnectionLifetime (on Windows) or connection\_lifetime (on Linux) | Use ConnectionLifetime (or connection\_lifetime) to specify the minimum number of seconds an open but idle connection is retained. This parameter is ignored if the value specified in MaxConnections is reached and a new connection (to a different database) is required to satisfy a waiting request. | By default, set to 0 (a connection is dropped when the connection is idle after the agent's processing loop completes a cycle in which the connection has not been used). | -| HeartbeatConnection (on Windows) or heartbeat\_connection (on Linux) | When set to TRUE, a dedicated connection used for sending the heartbeats. | By default, set to FALSE. | -| AllowBatchProbes (on Windows) or allow\_batch\_probes (on Linux) | If set to TRUE, the user will be able to create batch probes using custom probes feature. | By default, set to FALSE. | -| BatchScriptDir (on Windows) or batch\_script\_dir (on Linux) | Provide the path where script file (for alerting) will be stored. | On Windows, C:Usersuser\_nameAppDataLocalTemp. On Linux, set to /tmp. | -| AllowBatchJobSteps (on Windows) or batch\_script\_user | Provide the username who will run the script. | On Windows, set to TRUE and restart PEM Agent. Entries located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. On Linux, Restart the agent after modifying the file. If you do not specify a user, or the specified user does not exist, then the script will not be executed. | -| ConnectionCustomSetup (on Windows) or connection\_custom\_setup (on Linux) | Use this parameter to provide SQL code that will be invoked each time a new connection with the monitored server is established. | By default, no value is provided. | -| ca\_file (Linux only) | Provide the path where the CA certificate resides. | By default, /opt/PEM/agent/share/certs/ca-bundle.crt | -| WebhookSSLKey (on Windows) or webhook\_ssl\_key (on Linux) | The complete path to the webhook's SSL client key file. | | -| WebhookSSLCrt (on Windows) or webhook\_ssl\_crt (on Linux) | The complete path to the webhook's SSL client certificate file. | | -| WebhookSSLCaCrt (on Windows) or webhook\_ssl\_ca\_crt (on Linux) | The complete path to the webhook's SSL ca certificate file. | | -| WebhookSSLCrl (on Windows) or webhook\_ssl\_crl (on Linux) | The complete path of the CRL file to validate webhook server certificate. | | -| AllowInsecureWebhooks (on Windows) or allow\_insecure\_webhooks (on Linux) | When set to true, allow webhooks to call with insecure flag. | false | +| **Parameter Name** | **Description** | **Value (if applicable)** | +| ------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PEM_HOST (on Windows) or pem_host (on Linux) | The IP address or hostname of the PEM server. | By default, set to 127.0.0.1. | +| PEM_PORT (on Windows) or pem_port (on Linux) | The database server port to which the agent connects to communicate with the PEM server. | By default, the PEM server monitors port 5432. | +| AgentID (on Windows) or agent_id (on Linux) | A unique identifier assigned to the PEM agent. | The first agent is assigned an identifier of '1', the second agent is assigned an identifier of '2', and so on. | +| AgentUser (on Windows) or agent_user (on Linux) | User to connect the PEM database server | If present, and not set to empty string, it will be used to connect the PEM database server. | +| AgentCrtPath (on Windows) or agent_ssl_crt (on Linux) | The complete path to the PEM agent's certificate file. | By default, on Windows, C:\\Users\\user_name\\AppData\\Roaming\\pem/agent.crt. By default on Linux, /root/.pem/agent.crt. | +| AgentKeyPath (on Windows) or agent_ssl_key (on Linux) | The complete path to the PEM agent's key file. | By default, on Windows, C:\\Users\\user_name\\AppData\\Roaming\\pem/agent.key. By default on Linux, /root/.pem/agent.key. | +| AgentFlagDir (on Windows) or agent_flag_dir (on Linux) | Used for HA support. Specifies the directory path checked for requests to take over monitoring another server. Requests are made in the form of a file in the specified flag directory. | Not set by default. This option allows you to override the hard-coded default. | +| LogLevel (on Windows) or log_level (on Linux) | Log level specifies the type of event that will be written to the PEM log files. | Log level may be set to: error, debug1, debug2, or warning By default, log level is set to `warning` | +| log_location (on Linux only) | Specifies the location of the PEM worker log file. | On Linux, /var/log/pem/worker.log. On Windows, Logs & errors will be reported in the Application event log. | +| agent_log_location (on Linux only) | Specifies the location of the PEM agent log file. | On Linux, /var/log/pem/agent.log. On Windows, Logs & errors will be reported in the Application event log. | +| ShortWait (on Windows) or short_wait (on Linux) | The minimum length of time (in seconds) that the PEM agent will wait before checking which probes are next in the queue (waiting to run). | By default, 10 seconds. | +| LongWait (on Windows) or long_wait (on Linux) | The maximum length of time (in seconds) that the PEM agent will wait before attempting to connect to the PEM server if an initial connection attempt fails. | By default, 30 seconds. | +| AlertThreads (on Windows) or alert_threads (on Linux) | The number of alert threads to be spawned by the agent. | Set to 1 for the agent that resides on the host of the PEM server; should be set to 0 for all other agents. | +| EnableSMTP (on Windows) or enable_smtp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate emails. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| EnableSNMP (on Windows) or enable_snmp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate traps. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| enable_nagios (on Linux only) | When set to true, Nagios alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| EnableWebhook (on Windows) or enable_webhook (on Linux) | When set to true, Webhook alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| MaxWebhookRetries (on Windows) or max_webhook_retries (on Linux) | Set maximum number of times pemAgent should retry to call webhooks on failure. | Default 3. | +| ConnectTimeout (on Windows) or connect_timeout (on Linux) | The maximum length of time (in seconds, written as a decimal integer string) that the agent will wait for a connection. | Not set by default. If set to 0, the agent will wait indefinitely. | +| AllowServerRestart (on Windows) or allow_server_restart (on Linux) | If set to TRUE, the agent can restart the database server that it monitors. Some PEM features may be enabled/disabled, depending on the value of this parameter. | By default, set to TRUE. | +| MaxConnections (on Windows) or max_connections (on Linux) | The maximum number of probe connections used by the connection throttler. | By default, set to 0 (an unlimited number of connections). | +| ConnectionLifetime (on Windows) or connection_lifetime (on Linux) | Use ConnectionLifetime (or connection_lifetime) to specify the minimum number of seconds an open but idle connection is retained. This parameter is ignored if the value specified in MaxConnections is reached and a new connection (to a different database) is required to satisfy a waiting request. | By default, set to 0 (a connection is dropped when the connection is idle after the agent's processing loop completes a cycle in which the connection has not been used). | +| HeartbeatConnection (on Windows) or heartbeat_connection (on Linux) | When set to TRUE, a dedicated connection used for sending the heartbeats. | By default, set to FALSE. | +| AllowBatchProbes (on Windows) or allow_batch_probes (on Linux) | If set to TRUE, the user will be able to create batch probes using custom probes feature. | By default, set to FALSE. | +| BatchScriptDir (on Windows) or batch_script_dir (on Linux) | Provide the path where script file (for alerting) will be stored. | On Windows, C:Usersuser_nameAppDataLocalTemp. On Linux, set to /tmp. | +| AllowBatchJobSteps (on Windows) or batch_script_user | Provide the username who will run the script. | On Windows, set to TRUE and restart PEM Agent. Entries located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. On Linux, Restart the agent after modifying the file. If you do not specify a user, or the specified user does not exist, then the script will not be executed. | +| ConnectionCustomSetup (on Windows) or connection_custom_setup (on Linux) | Use this parameter to provide SQL code that will be invoked each time a new connection with the monitored server is established. | By default, no value is provided. | +| ca_file (Linux only) | Provide the path where the CA certificate resides. | By default, /opt/PEM/agent/share/certs/ca-bundle.crt | +| WebhookSSLKey (on Windows) or webhook_ssl_key (on Linux) | The complete path to the webhook's SSL client key file. | | +| WebhookSSLCrt (on Windows) or webhook_ssl_crt (on Linux) | The complete path to the webhook's SSL client certificate file. | | +| WebhookSSLCaCrt (on Windows) or webhook_ssl_ca_crt (on Linux) | The complete path to the webhook's SSL ca certificate file. | | +| WebhookSSLCrl (on Windows) or webhook_ssl_crl (on Linux) | The complete path of the CRL file to validate webhook server certificate. | | +| AllowInsecureWebhooks (on Windows) or allow_insecure_webhooks (on Linux) | When set to true, allow webhooks to call with insecure flag. | false |
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/02_pem_agent_ha.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/02_pem_agent_ha.mdx index 97af6a4e2d6..8620771e0d4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/02_pem_agent_ha.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/02_pem_agent_ha.mdx @@ -4,12 +4,11 @@ title: "High Availability Integration" - In high availability (HA) configurations, the database servers that are being monitored may be moved ("failed over") to a different host in the event of any problems, such as a hardware failure. There are numerous ways to maintain a backup server using features of Postgres and external tools. Please consult the Postgres documentation for further details. -In order to run in an HA environment, it is recommended that a PEM agent be installed on both the primary host machine, and any secondary machines that may be used as backups. The server is bound to the agent running on the primary host in the [normal fashion](../../02_toc_pem_agent/02_pem_agent_binding/#binding_pem_agent). +In order to run in an HA environment, it is recommended that a PEM agent be installed on both the primary host machine, and any secondary machines that may be used as backups. The server is bound to the agent running on the primary host in the [normal fashion](./#binding_pem_agent). -When the clustering solution initiates a failover of Postgres from one server to another, the PEM agent on the server that is taking over the running of the database may be instructed to take over the monitoring of the database server as well. The server must first be configured to allow "takeovers" using the `Allow takeover?` [configuration option](../../01_toc_pem_getting_started/#pem_define_connection) on the `PEM Agent` tab of the server configuration dialogue. +When the clustering solution initiates a failover of Postgres from one server to another, the PEM agent on the server that is taking over the running of the database may be instructed to take over the monitoring of the database server as well. The server must first be configured to allow "takeovers" using the `Allow takeover?` [configuration option](../../01_toc_pem_getting_started/07_pem_define_connection/#pem_define_connection) on the `PEM Agent` tab of the server configuration dialogue. To instruct the agent to takeover the monitoring of a server, the failover process must simply create a file in a special "flag" directory which will instruct the agent to take responsibility for the specified server. A command such as the following could be added to a failover script on a Linux server for example: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/index.mdx index 3b52e5c225e..a0111599d31 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/02_pem_agent_binding/index.mdx @@ -4,8 +4,7 @@ title: "Binding an Agent to a Server" - -The PEM agent runs as a service (on Windows) or as a daemon (on Linux), and is responsible for implementing scheduled tasks on the PEM server on behalf of the server. The PEM server installer automatically installs and configures an agent that is responsible for monitoring the PEM server. The PEM agent installer will setup and configure the agent to start automatically at boot time, however the agent can also be manually [started](../02_toc_pem_agent/#pem_agent_start_pem_agent) if required. +The PEM agent runs as a service (on Windows) or as a daemon (on Linux), and is responsible for implementing scheduled tasks on the PEM server on behalf of the server. The PEM server installer automatically installs and configures an agent that is responsible for monitoring the PEM server. The PEM agent installer will setup and configure the agent to start automatically at boot time, however the agent can also be manually [started](../03_pem_agent_start_pem_agent/#pem_agent_start_pem_agent) if required. To create a binding for a registered server, right click on the name of the server in the tree control, and select `Properties` from the context menu. Open the `PEM Agent` tab: @@ -23,7 +22,7 @@ Use fields on the `PEM Agent` tab to specify connection details for the PEM agen - Use the `SSL` field to specify an SSL operational mode. | Mode | Specify: | -|-------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------- | | require | To require SSL encryption for transactions between the server and the agent. | | prefer | To use SSL encryption between the server and the agent if SSL encryption is available. | | allow | To allow the connection to use SSL if required by the server. | @@ -36,12 +35,12 @@ Use fields on the `PEM Agent` tab to specify connection details for the PEM agen - Use the `Database` field to specify the name of the Postgres Plus database to which the agent will initially connect. - Specify the name of the user that agent should use when connecting to the server in the `User name` field. Note that if the specified user is not a database superuser, then some of the features will not work as expected. If you are using Postgres version 10 or above, you can use the `pg_monitor` role to grant the required privileges to a non-superuser. For information about `pg_monitor` role, see [Default Roles](https://www.postgresql.org/docs/current/default-roles.html). - Specify the password that the agent should use when connecting to the server in the `Password` field, and verify it by typing it again in the `Confirm password` field. If you do not specify a password, you will need to configure the authentication for the agent manually; you can use a `.pgpass` file for example. -- Specify `Yes` in the `Allow takeover?` field to specify that the server may be "taken over" by another agent. This feature allows an agent to take responsibility for the monitoring of the database server if, for example, the server has been moved to another host as part of a [high availability](../02_toc_pem_agent/02_pem_agent_binding/#pem_agent_ha) failover process. +- Specify `Yes` in the `Allow takeover?` field to specify that the server may be "taken over" by another agent. This feature allows an agent to take responsibility for the monitoring of the database server if, for example, the server has been moved to another host as part of a [high availability](02_pem_agent_ha/#pem_agent_ha) failover process. Contents:
-pem\_agent\_config\_params pem\_agent\_ha +pem_agent_config_params pem_agent_ha
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/03_pem_agent_start_pem_agent.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/03_pem_agent_start_pem_agent.mdx index cb8c783ad35..1695315f6de 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/03_pem_agent_start_pem_agent.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/03_pem_agent_start_pem_agent.mdx @@ -4,7 +4,6 @@ title: "Controlling the PEM Agent" - On Linux platforms, the name of the service script that controls a PEM agent is `pemagent`. You can use the pemagent service script to control the PEM agent. Enter: > `/etc/init.d/pem_agent action` diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/04_pem_agent_ha.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/04_pem_agent_ha.mdx index 1f447792fdf..739e641c66d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/04_pem_agent_ha.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/04_pem_agent_ha.mdx @@ -4,12 +4,11 @@ title: "High Availability Integration" - In high availability (HA) configurations, the database servers that are being monitored may be moved ("failed over") to a different host in the event of any problems, such as a hardware failure. There are numerous ways to maintain a backup server using features of Postgres and external tools. Please consult the Postgres documentation for further details. -In order to run in an HA environment, it is recommended that a PEM agent be installed on both the primary host machine, and any secondary machines that may be used as backups. The server is bound to the agent running on the primary host in the [normal fashion](../02_toc_pem_agent/02_pem_agent_binding/#binding_pem_agent). +In order to run in an HA environment, it is recommended that a PEM agent be installed on both the primary host machine, and any secondary machines that may be used as backups. The server is bound to the agent running on the primary host in the [normal fashion](02_pem_agent_binding/#binding_pem_agent). -When the clustering solution initiates a failover of Postgres from one server to another, the PEM agent on the server that is taking over the running of the database may be instructed to take over the monitoring of the database server as well. The server must first be configured to allow "takeovers" using the `Allow takeover?` [configuration option](../01_toc_pem_getting_started/#pem_define_connection) on the `PEM Agent` tab of the server configuration dialogue. +When the clustering solution initiates a failover of Postgres from one server to another, the PEM agent on the server that is taking over the running of the database may be instructed to take over the monitoring of the database server as well. The server must first be configured to allow "takeovers" using the `Allow takeover?` [configuration option](../01_toc_pem_getting_started/07_pem_define_connection/#pem_define_connection) on the `PEM Agent` tab of the server configuration dialogue. To instruct the agent to takeover the monitoring of a server, the failover process must simply create a file in a special "flag" directory which will instruct the agent to take responsibility for the specified server. A command such as the following could be added to a failover script on a Linux server for example: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/05_pem_agent_privileges.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/05_pem_agent_privileges.mdx index 8ad2546be97..adda32f835a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/05_pem_agent_privileges.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/05_pem_agent_privileges.mdx @@ -4,7 +4,6 @@ title: "PEM Agent Privileges" - By default, the PEM agent is installed with `root` privileges for the operating system host and superuser privileges for the database server. These privileges allow the PEM agent to invoke unrestricted probes on the monitored host and database server about system usage, retrieving and returning the information to the PEM server. Please note that PEM functionality diminishes as the privileges of the PEM agent decrease. For complete functionality, the PEM agent should run as `root` and on the same host as the database server. @@ -13,4 +12,19 @@ Please note that PEM functionality diminishes as the privileges of the PEM agent > - If the PEM agent is run under another lesser-privileged account, functionality will be limited even further. > - If the PEM agent is installed on a different host and is monitoring the database server remotely, then the functionality will be limited. -
Feature NameWorks with root UserWorks with non-root UserWorks with remote PEM Agent
Audit ManageryesThe Audit Log Manager may be unable to apply requested modifications if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server.no
Capacity Manageryesyes

yes

NOTE: There will be no co-relation between the database server and operating system metrices

Log ManageryesThe Log Manager may be unable to apply requested modifications if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running the PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server.no
Manage Alertsyesyes

yes

NOTE: When run alert script on the database server is selected, it will run on the machine, where bound PEM Agent is running, and not on the actual database server machine.

Manage Chartsyesyesyes
Manage DashboardsyesSome dashboards may not be able to show complete data. For example, columns such as swap usage, CPU usage, IO read, and IO write will be displayed as 0 in the session activity dashboard.Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as not available.
Manage ProbesyesSome of the PEM probes will not return information, and some of functionalities may be affected. For details about probe functionality, see the PEM Agent Guide.Some of the PEM probes will not return information, and some of the functionalities may be affected.
Postgres ExpertyesThe Postgres Expert will be able to access the configuration expert and schema expert, but not the security expert.The Expert will provide partial information as operating system information is not available.
Postgres Log Analysis ExpertyesThe Postgres Log Analysis Expert may not be able to do the analysis as it is dependent on the logs imported by log manager, which will not work as required.The Postgres Log Analysis Expert will not be able to do the analysis as it is dependent on the logs imported by log manager, which will not work as required.
Scheduled TasksyesFor Linux if user is the same as batch_script_user in agent.cfg then shell script will run.Scheduled tasks will work only for database server; scripts will run on a remote Agent.
Tuning WizardyesThe Tuning Wizard will be unable to run if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server.no
System Reportsyesyesyes
Core Usage ReportsyesyesThe Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed.
Managing BARTyesBART and the BART scanner may not be able to start/reload.

no

NOTE: BART requires password less authentication between two machines, where database server and BART are installed.

+| **Feature Name** | **Works with root User** | **Works with non-root User** | **Works with remote PEM Agent** | +| ---------------------------- | ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Audit Manager | yes | The Audit Log Manager may be unable to apply requested modifications if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server. | no | +| Capacity Manager | yes | yes | yes

NOTE: There will be no co-relation between the database server and operating system metrices
| +| Log Manager | yes | The Log Manager may be unable to apply requested modifications if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running the PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server. | no | +| Manage Alerts | yes | yes | yes

NOTE: When run alert script on the database server is selected, it will run on the machine, where bound PEM Agent is running, and not on the actual database server machine.
| +| Manage Charts | yes | yes | yes | +| Manage Dashboards | yes | Some dashboards may not be able to show complete data. For example, columns such as swap usage, CPU usage, IO read, and IO write will be displayed as 0 in the session activity dashboard. | Some dashboards may not be able to show complete data. For example, the operating system information of the database server will not be displayed as not available. | +| Manage Probes | yes | Some of the PEM probes will not return information, and some of functionalities may be affected. For details about probe functionality, see the [PEM Agent Guide](/pem/latest/pem_agent/). | Some of the PEM probes will not return information, and some of the functionalities may be affected. | +| Postgres Expert | yes | The Postgres Expert will be able to access the configuration expert and schema expert, but not the security expert. | The Expert will provide partial information as operating system information is not available. | +| Postgres Log Analysis Expert | yes | The Postgres Log Analysis Expert may not be able to do the analysis as it is dependent on the logs imported by log manager, which will not work as required. | The Postgres Log Analysis Expert will not be able to do the analysis as it is dependent on the logs imported by log manager, which will not work as required. | +| Scheduled Tasks | yes | For Linux if user is the same as batch_script_user in agent.cfg then shell script will run. | Scheduled tasks will work only for database server; scripts will run on a remote Agent. | +| Tuning Wizard | yes | The Tuning Wizard will be unable to run if the service cannot be restarted. The user running PEM Agent may be different from the user who owns the data directory of the database server, so user running PEM Agent may not be able to change the configuration and also may not be able to restart the services of the database server. | no | +| System Reports | yes | yes | yes | +| Core Usage Reports | yes | yes | The Core Usage report will not show complete information. For example, the platform, number of cores, and total RAM will not be displayed. | +| Managing BART | yes | BART and the BART scanner may not be able to start/reload. | no

NOTE: BART requires password less authentication between two machines, where database server and BART are installed.
| diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/06_pem_agent_config_params.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/06_pem_agent_config_params.mdx index c16f844165d..b405bb5acba 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/06_pem_agent_config_params.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/06_pem_agent_config_params.mdx @@ -4,48 +4,47 @@ title: "PEM Agent Configuration Parameters" +A number of user-configurable parameters and registry entries control the behavior of the PEM Agent. With the exception of the PEM_MAXCONN (or pem_maxconn) parameter, we strongly recommend against modifying any of the configuration parameters or registry entries listed below without first consulting EnterpriseDB support experts. -A number of user-configurable parameters and registry entries control the behavior of the PEM Agent. With the exception of the PEM\_MAXCONN (or pem\_maxconn) parameter, we strongly recommend against modifying any of the configuration parameters or registry entries listed below without first consulting EnterpriseDB support experts. - -> - On 32 bit Windows systems, PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\EnterpriseDB\\PEM\\agent -> - On 64 bit Windows systems, PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent +> - On 32 bit Windows systems, PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\EnterpriseDB\\PEM\\agent +> - On 64 bit Windows systems, PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent > - On Linux systems, PEM configuration options are stored in the agent.cfg file, located (by default) in /usr/edb/pem/agent/etc -| **Parameter Name** | **Description** | **Value (if applicable)** | -|----------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| PEM\_HOST (on Windows) or pem\_host (on Linux) | The IP address or hostname of the PEM server. | By default, set to 127.0.0.1. | -| PEM\_PORT (on Windows) or pem\_port (on Linux) | The database server port to which the agent connects to communicate with the PEM server. | By default, the PEM server monitors port 5432. | -| AgentID (on Windows) or agent\_id (on Linux) | A unique identifier assigned to the PEM agent. | The first agent is assigned an identifier of '1', the second agent is assigned an identifier of '2', and so on. | -| AgentUser (on Windows) or agent\_user (on Linux) | User to connect the PEM database server | If present, and not set to empty string, it will be used to connect the PEM database server. | -| AgentCrtPath (on Windows) or agent\_ssl\_crt (on Linux) | The complete path to the PEM agent's certificate file. | By default, on Windows, C:\\Users\\user\_name\\AppData\\Roaming\\pem/agent.crt. By default on Linux, /root/.pem/agent.crt. | -| AgentKeyPath (on Windows) or agent\_ssl\_key (on Linux) | The complete path to the PEM agent's key file. | By default, on Windows, C:\\Users\\user\_name\\AppData\\Roaming\\pem/agent.key. By default on Linux, /root/.pem/agent.key. | -| AgentFlagDir (on Windows) or agent\_flag\_dir (on Linux) | Used for HA support. Specifies the directory path checked for requests to take over monitoring another server. Requests are made in the form of a file in the specified flag directory. | Not set by default. This option allows you to override the hard-coded default. | -| LogLevel (on Windows) or log\_level (on Linux) | Log level specifies the type of event that will be written to the PEM log files. | Log level may be set to: error, debug1, debug2, or warning By default, log level is set to `warning` | -| log\_location (on Linux only) | Specifies the location of the PEM worker log file. | On Linux, /var/log/pem/worker.log. On Windows, Logs & errors will be reported in the Application event log. | -| agent\_log\_location (on Linux only) | Specifies the location of the PEM agent log file. | On Linux, /var/log/pem/agent.log. On Windows, Logs & errors will be reported in the Application event log. | -| ShortWait (on Windows) or short\_wait (on Linux) | The minimum length of time (in seconds) that the PEM agent will wait before checking which probes are next in the queue (waiting to run). | By default, 10 seconds. | -| LongWait (on Windows) or long\_wait (on Linux) | The maximum length of time (in seconds) that the PEM agent will wait before attempting to connect to the PEM server if an initial connection attempt fails. | By default, 30 seconds. | -| AlertThreads (on Windows) or alert\_threads (on Linux) | The number of alert threads to be spawned by the agent. | Set to 1 for the agent that resides on the host of the PEM server; should be set to 0 for all other agents. | -| EnableSMTP (on Windows) or enable\_smtp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate emails. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| EnableSNMP (on Windows) or enable\_snmp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate traps. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| enable\_nagios (on Linux only) | When set to true, Nagios alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| EnableWebhook (on Windows) or enable\_webhook (on Linux) | When set to true, Webhook alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | -| MaxWebhookRetries (on Windows) or max\_webhook\_retries (on Linux) | Set maximum number of times pemAgent should retry to call webhooks on failure. | Default 3. | -| ConnectTimeout (on Windows) or connect\_timeout (on Linux) | The maximum length of time (in seconds, written as a decimal integer string) that the agent will wait for a connection. | Not set by default. If set to 0, the agent will wait indefinitely. | -| AllowServerRestart (on Windows) or allow\_server\_restart (on Linux) | If set to TRUE, the agent can restart the database server that it monitors. Some PEM features may be enabled/disabled, depending on the value of this parameter. | By default, set to TRUE. | -| MaxConnections (on Windows) or max\_connections (on Linux) | The maximum number of probe connections used by the connection throttler. | By default, set to 0 (an unlimited number of connections). | -| ConnectionLifetime (on Windows) or connection\_lifetime (on Linux) | Use ConnectionLifetime (or connection\_lifetime) to specify the minimum number of seconds an open but idle connection is retained. This parameter is ignored if the value specified in MaxConnections is reached and a new connection (to a different database) is required to satisfy a waiting request. | By default, set to 0 (a connection is dropped when the connection is idle after the agent's processing loop completes a cycle in which the connection has not been used). | -| HeartbeatConnection (on Windows) or heartbeat\_connection (on Linux) | When set to TRUE, a dedicated connection used for sending the heartbeats. | By default, set to FALSE. | -| AllowBatchProbes (on Windows) or allow\_batch\_probes (on Linux) | If set to TRUE, the user will be able to create batch probes using custom probes feature. | By default, set to FALSE. | -| BatchScriptDir (on Windows) or batch\_script\_dir (on Linux) | Provide the path where script file (for alerting) will be stored. | On Windows, C:Usersuser\_nameAppDataLocalTemp. On Linux, set to /tmp. | -| AllowBatchJobSteps (on Windows) or batch\_script\_user | Provide the username who will run the script. | On Windows, set to TRUE and restart PEM Agent. Entries located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. On Linux, Restart the agent after modifying the file. If you do not specify a user, or the specified user does not exist, then the script will not be executed. | -| ConnectionCustomSetup (on Windows) or connection\_custom\_setup (on Linux) | Use this parameter to provide SQL code that will be invoked each time a new connection with the monitored server is established. | By default, no value is provided. | -| ca\_file (Linux only) | Provide the path where the CA certificate resides. | By default, /opt/PEM/agent/share/certs/ca-bundle.crt | -| WebhookSSLKey (on Windows) or webhook\_ssl\_key (on Linux) | The complete path to the webhook's SSL client key file. | | -| WebhookSSLCrt (on Windows) or webhook\_ssl\_crt (on Linux) | The complete path to the webhook's SSL client certificate file. | | -| WebhookSSLCaCrt (on Windows) or webhook\_ssl\_ca\_crt (on Linux) | The complete path to the webhook's SSL ca certificate file. | | -| WebhookSSLCrl (on Windows) or webhook\_ssl\_crl (on Linux) | The complete path of the CRL file to validate webhook server certificate. | | -| AllowInsecureWebhooks (on Windows) or allow\_insecure\_webhooks (on Linux) | When set to true, allow webhooks to call with insecure flag. | false | +| **Parameter Name** | **Description** | **Value (if applicable)** | +| ------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| PEM_HOST (on Windows) or pem_host (on Linux) | The IP address or hostname of the PEM server. | By default, set to 127.0.0.1. | +| PEM_PORT (on Windows) or pem_port (on Linux) | The database server port to which the agent connects to communicate with the PEM server. | By default, the PEM server monitors port 5432. | +| AgentID (on Windows) or agent_id (on Linux) | A unique identifier assigned to the PEM agent. | The first agent is assigned an identifier of '1', the second agent is assigned an identifier of '2', and so on. | +| AgentUser (on Windows) or agent_user (on Linux) | User to connect the PEM database server | If present, and not set to empty string, it will be used to connect the PEM database server. | +| AgentCrtPath (on Windows) or agent_ssl_crt (on Linux) | The complete path to the PEM agent's certificate file. | By default, on Windows, C:\\Users\\user_name\\AppData\\Roaming\\pem/agent.crt. By default on Linux, /root/.pem/agent.crt. | +| AgentKeyPath (on Windows) or agent_ssl_key (on Linux) | The complete path to the PEM agent's key file. | By default, on Windows, C:\\Users\\user_name\\AppData\\Roaming\\pem/agent.key. By default on Linux, /root/.pem/agent.key. | +| AgentFlagDir (on Windows) or agent_flag_dir (on Linux) | Used for HA support. Specifies the directory path checked for requests to take over monitoring another server. Requests are made in the form of a file in the specified flag directory. | Not set by default. This option allows you to override the hard-coded default. | +| LogLevel (on Windows) or log_level (on Linux) | Log level specifies the type of event that will be written to the PEM log files. | Log level may be set to: error, debug1, debug2, or warning By default, log level is set to `warning` | +| log_location (on Linux only) | Specifies the location of the PEM worker log file. | On Linux, /var/log/pem/worker.log. On Windows, Logs & errors will be reported in the Application event log. | +| agent_log_location (on Linux only) | Specifies the location of the PEM agent log file. | On Linux, /var/log/pem/agent.log. On Windows, Logs & errors will be reported in the Application event log. | +| ShortWait (on Windows) or short_wait (on Linux) | The minimum length of time (in seconds) that the PEM agent will wait before checking which probes are next in the queue (waiting to run). | By default, 10 seconds. | +| LongWait (on Windows) or long_wait (on Linux) | The maximum length of time (in seconds) that the PEM agent will wait before attempting to connect to the PEM server if an initial connection attempt fails. | By default, 30 seconds. | +| AlertThreads (on Windows) or alert_threads (on Linux) | The number of alert threads to be spawned by the agent. | Set to 1 for the agent that resides on the host of the PEM server; should be set to 0 for all other agents. | +| EnableSMTP (on Windows) or enable_smtp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate emails. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate emails. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| EnableSNMP (on Windows) or enable_snmp (on Linux) | When set to true for multiple PEM Agents (7.13 or lesser) and PEM backend database (9.4 or lesser) then it may send more duplicate traps. Whereas for PEM Agents (7.14 or higher) and PEM backend database (9.5 or higher) then it may send lesser duplicate traps. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| enable_nagios (on Linux only) | When set to true, Nagios alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| EnableWebhook (on Windows) or enable_webhook (on Linux) | When set to true, Webhook alerting is enabled. | By default, set to true for the agent that resides on the host of the PEM server; false for all other agents. | +| MaxWebhookRetries (on Windows) or max_webhook_retries (on Linux) | Set maximum number of times pemAgent should retry to call webhooks on failure. | Default 3. | +| ConnectTimeout (on Windows) or connect_timeout (on Linux) | The maximum length of time (in seconds, written as a decimal integer string) that the agent will wait for a connection. | Not set by default. If set to 0, the agent will wait indefinitely. | +| AllowServerRestart (on Windows) or allow_server_restart (on Linux) | If set to TRUE, the agent can restart the database server that it monitors. Some PEM features may be enabled/disabled, depending on the value of this parameter. | By default, set to TRUE. | +| MaxConnections (on Windows) or max_connections (on Linux) | The maximum number of probe connections used by the connection throttler. | By default, set to 0 (an unlimited number of connections). | +| ConnectionLifetime (on Windows) or connection_lifetime (on Linux) | Use ConnectionLifetime (or connection_lifetime) to specify the minimum number of seconds an open but idle connection is retained. This parameter is ignored if the value specified in MaxConnections is reached and a new connection (to a different database) is required to satisfy a waiting request. | By default, set to 0 (a connection is dropped when the connection is idle after the agent's processing loop completes a cycle in which the connection has not been used). | +| HeartbeatConnection (on Windows) or heartbeat_connection (on Linux) | When set to TRUE, a dedicated connection used for sending the heartbeats. | By default, set to FALSE. | +| AllowBatchProbes (on Windows) or allow_batch_probes (on Linux) | If set to TRUE, the user will be able to create batch probes using custom probes feature. | By default, set to FALSE. | +| BatchScriptDir (on Windows) or batch_script_dir (on Linux) | Provide the path where script file (for alerting) will be stored. | On Windows, C:Usersuser_nameAppDataLocalTemp. On Linux, set to /tmp. | +| AllowBatchJobSteps (on Windows) or batch_script_user | Provide the username who will run the script. | On Windows, set to TRUE and restart PEM Agent. Entries located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. On Linux, Restart the agent after modifying the file. If you do not specify a user, or the specified user does not exist, then the script will not be executed. | +| ConnectionCustomSetup (on Windows) or connection_custom_setup (on Linux) | Use this parameter to provide SQL code that will be invoked each time a new connection with the monitored server is established. | By default, no value is provided. | +| ca_file (Linux only) | Provide the path where the CA certificate resides. | By default, /opt/PEM/agent/share/certs/ca-bundle.crt | +| WebhookSSLKey (on Windows) or webhook_ssl_key (on Linux) | The complete path to the webhook's SSL client key file. | | +| WebhookSSLCrt (on Windows) or webhook_ssl_crt (on Linux) | The complete path to the webhook's SSL client certificate file. | | +| WebhookSSLCaCrt (on Windows) or webhook_ssl_ca_crt (on Linux) | The complete path to the webhook's SSL ca certificate file. | | +| WebhookSSLCrl (on Windows) or webhook_ssl_crl (on Linux) | The complete path of the CRL file to validate webhook server certificate. | | +| AllowInsecureWebhooks (on Windows) or allow_insecure_webhooks (on Linux) | When set to true, allow webhooks to call with insecure flag. | false |
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/07_pem_agent_self_registration.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/07_pem_agent_self_registration.mdx index b8e67da81f8..4e70ed3256c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/07_pem_agent_self_registration.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/07_pem_agent_self_registration.mdx @@ -4,19 +4,31 @@ title: "PEM Agent Self Registration" - Each PEM agent must be `registered` with the PEM server. The registration process provides the PEM server with the information it needs to communicate with the agent. The PEM agent graphical installer supports agent self-registration, but you can use the `pemworker` utility to register the agent if you skip PEM agent registration during a graphical installation or use an RPM package to install a PEM agent. The RPM installer places the PEM worker utility in the `/usr/edb/pem/agent/bin` directory. Use the following commands to register an agent: -- **On Linux**: pemworker −−register-agent \[register-options\] -- **On Windows**: pemworker.exe REGISTER \[register-options\] +- **On Linux**: pemworker −−register-agent \[register-options] +- **On Windows**: pemworker.exe REGISTER \[register-options] The following information is required when registering an agent with the PEM Server; you will be prompted for information if it is not provided on the command line: -
ParametersCommand-line optionsOptionalDescriptionDefault Value
PEM Database Server Host−−pem-server <hostname/address>NoAddress/Host name of the PEM database server
PEM Admin User−−pem-user <username>NoPEM Admin User to connect to the PEM database server.
PEM Database Server Port−−pem-port <port number>YesPort on which PEM database server is running.5432
Agent Certificate Path−−cert-path <certificate path>YesPath, where certificates need to be created.On Linux, "~/.pem" On Windows, “%APPDATA%/pem”
Agent Display Name−−display-name <agent_name>YesDisplay name of the PEM Agent.System hostname
Agent Group−−group <group_name>YesThe name of the group in which the agent will be displayed.
Agent Team−−team <team_name>YesThe name of the group role that may access the PEM Agent.
Agent Owner−−owner <owner_name>YesThe name of the owner of the PEM Agent.
Force registration−−force-registrationYesForcefully registers the agent to the PEM server with the arguments provided. It can be used to override the existing agent configuration.
Enable Heartbeat−−enable-heartbeat-connectionYesAgent to use dedicated connection to update the heartbeat.false
Agent User−−pem-agent-userYes

Use this user to connect the PEM database server. Specify, it when you would like to use a connection pooler between PEM Agent and PEM database server. It will generate the SSL Ceriticates, which will used by the pemworker to connect to the PEM database server instead, for this user instead of the default agent user.

NOTE: Specified user must be a member of 'pem_agent' role.

- -**NOTE:** You can use the `PEM_SERVER_PASSWORD` environment variable to set the password of the `PEM Admin User`. If the `PEM_SERVER_PASSWORD` is not set, the server will use the `PGPASSWORD` or `pgpass file` when connecting to the **PEM Database Server**. +| Parameters | Command-line options | Optional | Description | Default Value | +| ---------------------------- | ---------------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- | +| **PEM Database Server Host** | −−pem-server <hostname/address> | No | Address/Host name of the PEM database server | | +| **PEM Admin User** | −−pem-user <username> | No | `PEM Admin User` to connect to the PEM database server. | | +| **PEM Database Server Port** | −−pem-port <port number> | Yes | Port on which PEM database server is running. | 5432 | +| **Agent Certificate Path** | −−cert-path <certificate path> | Yes | Path, where certificates need to be created. | On Linux, "~/.pem" On Windows, “%APPDATA%/pem” | +| **Agent Display Name** | −−display-name <agent_name> | Yes | Display name of the PEM Agent. | System hostname | +| **Agent Group** | −−group <group_name> | Yes | The name of the group in which the agent will be displayed. | | +| **Agent Team** | −−team <team_name> | Yes | The name of the group role that may access the PEM Agent. | | +| **Agent Owner** | −−owner <owner_name> | Yes | The name of the owner of the PEM Agent. | | +| **Force registration** | −−force-registration | Yes | Forcefully registers the agent to the PEM server with the arguments provided. It can be used to override the existing agent configuration. | | +| **Enable Heartbeat** | −−enable-heartbeat-connection | Yes | Agent to use dedicated connection to update the heartbeat. | false | +| **Agent User** | −−pem-agent-user | Yes | Use this user to connect the PEM database server. Specify, it when you would like to use a connection pooler between PEM Agent and PEM database server. It will generate the SSL Ceriticates, which will used by the pemworker to connect to the PEM database server instead, for this user instead of the default agent user.

**NOTE:** Specified user must be a member of 'pem_agent' role.
| | + +!!! Note + You can use the `PEM_SERVER_PASSWORD` environment variable to set the password of the `PEM Admin User`. If the `PEM_SERVER_PASSWORD` is not set, the server will use the `PGPASSWORD` or `pgpass file` when connecting to the **PEM Database Server**. Example: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/08_pem_agent_server_registration.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/08_pem_agent_server_registration.mdx index 13f2b7d2d73..a6b8a0817d4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/08_pem_agent_server_registration.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/08_pem_agent_server_registration.mdx @@ -4,36 +4,54 @@ title: "Register/Unregister database server using PEM Agent" - You can use the `pemworker` utility to register a database server for monitoring. The RPM installer places the utility in the `/usr/edb/pem/agent/bin` directory. Use the following commands to register a server: -- **On Linux**: pemworker −−register-server \[register-server-options\] -- **On Windows**: pemworker.exe REGISTER-SERVER \[register-server-options\] +- **On Linux**: pemworker −−register-server \[register-server-options] +- **On Windows**: pemworker.exe REGISTER-SERVER \[register-server-options] -Use the parameters in the table that follow to provide connection information for a Postgres database server that you wish to register for monitoring by the PEM Server. Please note that the pg\_hba.conf file on the database server must be configured to allow connections from the PEM server. +Use the parameters in the table that follow to provide connection information for a Postgres database server that you wish to register for monitoring by the PEM Server. Please note that the pg_hba.conf file on the database server must be configured to allow connections from the PEM server. Properties that begin with −−asb (agent server binding) define the binding for an agent that does not reside on the same host as the database server. These properties are optional if you have a PEM agent installed on the host of the database server. You will be prompted for required information if you do not include it on the command line. -
ParametersCommand-line optionsOptionalDescriptionDefault Value
PEM Admin User−−pem-user <username>NoThe name of the PEM Admin User that will connect to the monitored server.
Server Host−−server-addr <host name/address>NoHost name/address of the monitored server.
Server Port−−server-port <port>NoPort on which database server is running.
Server Database−−server-database <name>NoThe database to which PEM will connect.
Server User−−server-user <name>NoThe database user role that will be used by the agent for monitoring purposes.
Server Service Name−−server-service-name <name>YesName of the system level service, which controls the operations like start, stop, restart, reload, etc. of the server.
Remote Monitoring?−−remote-monitoring <yes/no>Nono if the monitored server resides on the same machine as the bound PEM agent, yes if the agent is on another host.no
EDB Failover Manager

Cluster Name

−−efm-cluster-name <name>YesName of EDB Failover Manager Cluster associated with this server.
EDB Failover manager Installation Path−−efm-install-path <path>YesInstallation path of EDB Failover Manager associated with this server.
Server Display Name−−display-name <server_name>YesDisplay name of the registred server.System hostname
Host Name−−asb-host-name <name_of_host>YesThe name of the host to which the agent is connecting.The value specified by the −−server-addr property.
Host Port−−asb-host-port <port_number>YesThe port number that the agent will use when connecting to the database.The value specified by the −−server-port property.
Host DB−−asb-host-db <database_name>YesThe name of the database to which the agent will connect.The value specified by the −−server-database property.
Host User Name−−asb-host-user <database_user>YesThe database user name that the agent will supply when authenticating with the database.The value specified by the −−server-user property.
SSL Mode−−asb-ssl-mode <certificate path>YesType of SSL authentication that will be used for connections. Supported values include prefer, require, disable, verify-CA, verify-full.prefer
Server Group−−group <group_name>YesSpecify the name of the server group in which the server will be displayed.
Server Team−−team <team_name>YesSpecify the name of the group role that will be allowed to access the server.
Owner−−owner <owner_name>YesSpecify the name of the role that will own the monitored server.
- -Use the **PEM\_MONITORED\_SERVER\_PASSWORD** environment variable to set the password of the user of the database server which is to be registered. When registering the database server, the pemworker utility will bind the server to the `PEM Agent` associated with the pemworker utility. The PEM server will use the specified user name (`Server User`) and password specified in the **PEM\_MONITORED\_SERVER\_PASSWORD** environment variable when monitoring the database server. - -Use the **PEM\_SERVER\_PASSWORD** environment variable to provide the password of the user of the PEM database server. If the `PEM_SERVER_PASSWORD` is not set, the server will use the `PGPASSWORD` or `pgpass file` when connecting to the **PEM Database Server**. +| Parameters | Command-line options | Optional | Description | Default Value | +| ---------------------------------------------------------------------------------- | ------------------------------------ | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ | +| **PEM Admin User** | −−pem-user <username> | No | The name of the `PEM Admin User` that will connect to the monitored server. | | +| **Server Host** | −−server-addr <host name/address> | No | Host name/address of the monitored server. | | +| **Server Port** | −−server-port <port> | No | Port on which database server is running. | | +| **Server Database** | −−server-database <name> | No | The database to which PEM will connect. | | +| **Server User** | −−server-user <name> | No | The database user role that will be used by the agent for monitoring purposes. | | +| **Server Service Name** | −−server-service-name <name> | Yes | Name of the system level service, which controls the operations like start, stop, restart, reload, etc. of the server. | | +| **Remote Monitoring?** | −−remote-monitoring <yes/no> | No | `no` if the monitored server resides on the same machine as the bound PEM agent, `yes` if the agent is on another host. | no | +| - **EDB Failover Manager**

Cluster Name
| −−efm-cluster-name <name> | Yes | Name of EDB Failover Manager Cluster associated with this server. | | +| **EDB Failover manager** **Installation Path** | −−efm-install-path <path> | Yes | Installation path of EDB Failover Manager associated with this server. | | +| **Server Display Name** | −−display-name <server_name> | Yes | Display name of the registred server. | System hostname | +| **Host Name** | −−asb-host-name <name_of_host> | Yes | The name of the host to which the agent is connecting. | The value specified by the −−server-addr property. | +| **Host Port** | −−asb-host-port <port_number> | Yes | The port number that the agent will use when connecting to the database. | The value specified by the −−server-port property. | +| **Host DB** | −−asb-host-db <database_name> | Yes | The name of the database to which the agent will connect. | The value specified by the −−server-database property. | +| **Host User Name** | −−asb-host-user <database_user> | Yes | The database user name that the agent will supply when authenticating with the database. | The value specified by the −−server-user property. | +| **SSL Mode** | −−asb-ssl-mode <certificate path> | Yes | Type of SSL authentication that will be used for connections. Supported values include `prefer`, `require`, `disable`, `verify-CA`, `verify-full`. | prefer | +| **Server Group** | −−group <group_name> | Yes | Specify the name of the server group in which the server will be displayed. | | +| **Server Team** | −−team <team_name> | Yes | Specify the name of the group role that will be allowed to access the server. | | +| **Owner** | −−owner <owner_name> | Yes | Specify the name of the role that will own the monitored server. | | + +Use the **PEM_MONITORED_SERVER_PASSWORD** environment variable to set the password of the user of the database server which is to be registered. When registering the database server, the pemworker utility will bind the server to the `PEM Agent` associated with the pemworker utility. The PEM server will use the specified user name (`Server User`) and password specified in the **PEM_MONITORED_SERVER_PASSWORD** environment variable when monitoring the database server. + +Use the **PEM_SERVER_PASSWORD** environment variable to provide the password of the user of the PEM database server. If the `PEM_SERVER_PASSWORD` is not set, the server will use the `PGPASSWORD` or `pgpass file` when connecting to the **PEM Database Server**. ## To unregister a database server You can use the pemworker utility to unregister a server: -- **On Linux**: pemworker −−unregister-server \[unregister-server-options\] -- **On Windows**: pemworker.exe UNREGISTER-SERVER \[unregister-server-options\] +- **On Linux**: pemworker −−unregister-server \[unregister-server-options] +- **On Windows**: pemworker.exe UNREGISTER-SERVER \[unregister-server-options] Include the following information when unregistering a database server from the `PEM Server`; you will be prompted for required information if you do not include it on the command line: -| Parameters | Command-line options | Optional | Description | Default Value | -|--------------------|-----------------------------------------|----------|------------------------------------------------------|---------------| -| **PEM Admin User** | −−pem-user <username> | No | `PEM Admin User` to connect the PEM database server. | | -| **Server Host** | −−server-addr <host name/address> | No | Host name/address of the database server. | | -| **Server Port** | −−server-port <port> | No | Port on which database server is running. | | +| Parameters | Command-line options | Optional | Description | Default Value | +| ------------------ | ------------------------------------ | -------- | ---------------------------------------------------- | ------------- | +| **PEM Admin User** | −−pem-user <username> | No | `PEM Admin User` to connect the PEM database server. | | +| **Server Host** | −−server-addr <host name/address> | No | Host name/address of the database server. | | +| **Server Port** | −−server-port <port> | No | Port on which database server is running. | | The command will unregister the server from the `PEM Server` for the specified combination of `Server Host` and `Server Port`, which is being monitored by the `PEM Agent`. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/index.mdx index c4cfd279a20..f62a609ec08 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/02_toc_pem_agent/index.mdx @@ -4,14 +4,13 @@ title: "Managing a PEM Agent" - The PEM agent is responsible for implementing scheduled tasks on the PEM server on behalf of the server. The agent runs as a service (on Windows) or as a daemon (on Linux). The PEM server installer automatically installs and configures an agent that is responsible for monitoring the PEM server; you can use the PEM agent installer to add additional agents. Contents:
-pem\_agent\_properties pem\_agent\_binding pem\_agent\_start\_pem\_agent pem\_agent\_ha +pem_agent_properties pem_agent_binding pem_agent_start_pem_agent pem_agent_ha
@@ -19,6 +18,6 @@ The PEM agent has a number of configuration parameters that control agent behavi
-pem\_agent\_privileges pem\_agent\_config\_params pem\_agent\_self\_registration pem\_agent\_server\_registration +pem_agent_privileges pem_agent_config_params pem_agent_self_registration pem_agent_server_registration
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx index e1265e5743a..5d7b6ba2697 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx @@ -4,12 +4,11 @@ title: "PEM Main Browser Window" - The PEM client features a menu bar and a window divided into two panes: the `Browser` tree control in the left pane, and a tabbed browser in the right pane. ![PEM browser window](/../images/pem_browser_window.png) -[Menus](../03_toc_pem_client/#pem_menu_bar) displayed across the top of the browser window provide quick, context-sensitive access to PEM features and functionality. +[Menus](03_pem_menu_bar/#pem_menu_bar) displayed across the top of the browser window provide quick, context-sensitive access to PEM features and functionality. ## The PEM Client Object Browser @@ -22,11 +21,44 @@ Expand nodes in the tree control to display a hierarchical view of the database Right-click on a node of the tree control to access a context-sensitive menu and perform common tasks. Context menu options may include one or more of the following selections: -
OptionAction
Add named restore point

Click to create and enter the name of a restore point.

Backup...

Click to open the Backup... dialog to backup database objects.

Backup Globals...

Click to open the Backup Globals... dialog to backup cluster objects.

Backup Server...

Click to open the Backup Server... dialog to backup a server.

Connect Server

Click to establish a connection with the selected server.

Create

Click to access a context menu that provides context-sensitive selections.

Your selection opens a Create dialog for creating a new object.
CREATE Script

Click to open the Query tool to edit or view the CREATE script.

CREATE Script

Click to open the Query tool to edit or view the CREATE script.

Dashboards

Click through for quick access to PEM dashboards.

Delete/Drop

Click to delete the currently selected object from the server.

Disconnect Database...

Click to terminate a database connection.

Disconnect Server...

Click to refresh the currently selected object.

Drop Cascade

Click to delete the currently selected object and all dependent objects from the server.

Debugging

Click to access the Debugger tool.

Grant Wizard

Click to access the Grant Wizard tool.

Maintenance...

Click to open the Maintenance... dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER.

Management

Click to access management tasks that are relevant to the node.

Properties...

Click to review or modify the currently selected object's properties.

Refresh...

Click to refresh the currently selected object.

Reload Configuration...

Click to update configuration files without restarting the server.

Restore...

Click to access the Restore dialog to restore database files from a backup.

View Data

Use the View Data option to access the data stored in a selected table with the Data Output tab of the Query Tool.

+| Option | Action | +| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Add named restore point` | Click to create and enter the name of a restore point.
| +| `Backup...` | Click to open the [Backup...](../05_toc_pem_management_basics/06_backup_dialog/#backup_dialog) dialog to backup database objects.
| +| `Backup Globals...` | Click to open the [Backup Globals...](../05_toc_pem_management_basics/07_backup_globals_dialog/#backup_globals_dialog) dialog to backup cluster objects.
| +| `Backup Server...` | Click to open the [Backup Server...](../05_toc_pem_management_basics/08_backup_server_dialog/#backup_server_dialog) dialog to backup a server.
| +| `Connect Server` | Click to establish a connection with the selected server.
| +| `Create` | Click to access a context menu that provides context-sensitive selections.Your selection opens a `Create` dialog for creating a new object. | +| `CREATE Script` | Click to open the [Query tool](05_keyboard_shortcuts/#query-tool) to edit or view the CREATE script.
| +| `CREATE Script` | Click to open the [Query tool](05_keyboard_shortcuts/#query-tool) to edit or view the CREATE script.
| +| `Dashboards` | Click through for quick access to PEM dashboards.
| +| `Delete/Drop` | Click to delete the currently selected object from the server.
| +| `Disconnect Database...` | Click to terminate a database connection.
| +| `Disconnect Server...` | Click to refresh the currently selected object.
| +| `Drop Cascade` | Click to delete the currently selected object and all dependent objects from the server.
| +| `Debugging` | Click to access the [Debugger](05_keyboard_shortcuts/#debugger) tool.
| +| `Grant Wizard` | Click to access the [Grant Wizard](../05_toc_pem_management_basics/01_grant_wizard/#grant_wizard) tool.
| +| `Maintenance...` | Click to open the [Maintenance...](../05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog/#maintenance_dialog) dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER.
| +| `Management` | Click to access management tasks that are relevant to the node.
| +| `Properties...` | Click to review or modify the currently selected object's properties.
| +| `Refresh...` | Click to refresh the currently selected object.
| +| `Reload Configuration...` | Click to update configuration files without restarting the server.
| +| `Restore...` | Click to access the [Restore](../05_toc_pem_management_basics/09_restore_dialog/#restore_dialog) dialog to restore database files from a backup.
| +| `View Data` | Use the `View Data` option to access the data stored in a selected table with the `Data Output` tab of the `Query Tool`.
| The context-sensitive menus associated with `Tables` and nested `Table` nodes provides additional display options: -
OptionAction
Import/Export...

Click open the Import/Export... <import_export_data> dialog to import data to or export data from the selected table.

Reset Statistics

Click to reset statistics for the selected table.

Scripts

Click to open the Query tool to edit or view the selected script from the flyout menu.

Truncate

Click to remove all rows from a table.

Truncate Cascade

Click to remove all rows from a table and its child tables.

View First 100 Rows

Click to access the data grid that displays the first 100 rows of the selected table.

View Last 100 Rows

Click to access the data grid that displays the last 100 rows of the selected table.

View All Rows

Click to access the data grid that displays all rows of the selected table.

View Filtered Rows...

Click to access the Data Filter popup to apply a filter to a set of data.

+| Option | Action | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Import/Export...` | Click open the Import/Export... <import_export_data> dialog to import data to or export data from the selected table.
| +| `Reset Statistics` | Click to reset statistics for the selected table.
| +| `Scripts` | Click to open the [Query tool](05_keyboard_shortcuts/#query-tool) to edit or view the selected script from the flyout menu.
| +| `Truncate` | Click to remove all rows from a table.
| +| `Truncate Cascade` | Click to remove all rows from a table and its child tables.
| +| `View First 100 Rows` | Click to access [the data grid](../08_toc_pem_developer_tools/04_editgrid/#editgrid) that displays the first 100 rows of the selected table.
| +| `View Last 100 Rows` | Click to access [the data grid](../08_toc_pem_developer_tools/04_editgrid/#editgrid) that displays the last 100 rows of the selected table.
| +| `View All Rows` | Click to access [the data grid](../08_toc_pem_developer_tools/04_editgrid/#editgrid) that displays all rows of the selected table.
| +| `View Filtered Rows...` | Click to access the `Data Filter` popup to apply a filter to a set of data.
| ## The PEM Tabbed Browser Window @@ -36,7 +68,7 @@ The main panel of the PEM client contains a collection of tabs that display info The `Dashboard` tab is context-sensitive; when you navigate to the `Dashboard` tab from a server group or the `PEM Agents` node, the EDB Postgres `Welcome` window opens, allowing you to: -- Click the `Add New Server` icon to open the [Create - Server dialog](../01_toc_pem_getting_started/#pem_define_connection) to define a connection to a server. +- Click the `Add New Server` icon to open the [Create - Server dialog](../01_toc_pem_getting_started/07_pem_define_connection/#pem_define_connection) to define a connection to a server. - Click the `Configure PEM` icon to open the [Server Configuration dialog](../04_toc_pem_features/02_pem_server_config/#pem_server_config) and modify server parameters. - Click the `Getting Started` icon to open a new tab, displaying the PEM Getting Started Guide at the EnterpriseDB website. - Click the `EDB Website` icon to navigate to the home page of the EnterpriseDB website. The EnterpriseDB website features news about upcoming events and other projects. @@ -69,7 +101,7 @@ The `SQL` tab displays the SQL code used to generate the object currently select The `Statistics` tab displays the statistics gathered for each object on the tree control; the statistics displayed in the table vary by the type of object that is highlighted. Click a column heading to sort the table by the data displayed in the column; click again to reverse the sort order. The following table lists some of the statistics that may be displayed: | Panel | Description | -|---------------------------|------------------------------------------------------------------------------------------------------------| +| ------------------------- | ---------------------------------------------------------------------------------------------------------- | | `PID` | The process ID associated with the row. | | `User` | The name of the user that owns the object. | | `Database` | displays the database name. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx index 75a6efec08f..f644511884d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx @@ -4,11 +4,10 @@ title: "Browser Toolbar" - The browser toolbar provides shortcut buttons for frequently used features like View Data and the Query Tool which are most frequently used in PEM. This toolbar is visible on the Browser panel. Buttons get enabled/disabled based on the selected browser node. ![Browser Toolbar](/../images/pem_toolbar.png) -- Use the [Query Tool](../08_toc_pem_developer_tools/#query_tool) button to open the Query Tool in the current database context. +- Use the [Query Tool](05_keyboard_shortcuts/#query-tool) button to open the Query Tool in the current database context. - Use the [View Data](../08_toc_pem_developer_tools/04_editgrid/#editgrid) button to view/edit the data stored in a selected table. -- Use the [Filtered Rows](../08_toc_pem_developer_tools/04_editgrid/#viewdata_filter) button to access the Data Filter popup to apply a filter to a set of data for viewing/editing. +- Use the [Filtered Rows](../08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter/#viewdata_filter) button to access the Data Filter popup to apply a filter to a set of data for viewing/editing. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx index 99564b0255d..10d7b0aa603 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx @@ -4,7 +4,6 @@ title: "The PEM Menu Bar" - The PEM menu bar provides access to commands and features that you can use to manage your database servers and the objects that reside on those servers. If an option is disabled: - The database server to which you are currently connected may not support the selected feature. @@ -19,12 +18,12 @@ Context-sensitive menus across the top of the PEM web interface allow you to cus Use the `File` menu to access the following options: -| Menu Option | Action | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Preferences | Click to open the [Preferences](../03_toc_pem_client/#preferences) dialog to customize your PEM client settings. | -| Lock Layout | Click to open a sub-menu to select the level for locking the UI layout. This can also be changed from the Browser -> Display settings tab [preferences](../03_toc_pem_client/#preferences). | -| Server Configuration | Click to open the Server Configuration dialog and update your PEM server configuration settings. | -| Reset Layout | If a workspace panel is popped out by mistake or intentionally it can be reset back to default using Reset Layout. | +| Menu Option | Action | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Preferences | Click to open the [Preferences](04_preferences/#preferences) dialog to customize your PEM client settings. | +| Lock Layout | Click to open a sub-menu to select the level for locking the UI layout. This can also be changed from the Browser -> Display settings tab [preferences](04_preferences/#preferences). | +| Server Configuration | Click to open the Server Configuration dialog and update your PEM server configuration settings. | +| Reset Layout | If a workspace panel is popped out by mistake or intentionally it can be reset back to default using Reset Layout. | **The Object Menu** @@ -32,27 +31,27 @@ Use the `File` menu to access the following options: The `Object` menu is context-sensitive. Use the `Object` menu to access the following options: -| Menu Option | Action | -|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| -| Create | Click `Create` to access a context menu that provides context-sensitive selections. Your selection opens a `Create` dialog for creating a new object. | -| Refresh... | Click to refresh the currently selected object. | -| Connect Server | Click to open the [Connect to Server](../01_toc_pem_getting_started/#pem_connect_to_server) dialog to establish a connection with a server. | -| CREATE Script | Click to open the [Query tool](../08_toc_pem_developer_tools/#query_tool) to edit or view the selected script. | -| Disconnect Server/Database | Click to disconnect the selected server. | -| Remove Server | Click to remove the selected server from the browser tree. | -| BART | Click to access a context menu that provides options for removing BART configuration, taking a BART backup, or revalidate the BART configuration. | -| Clear Saved Password | If you have saved the database server password, click to clear the saved password. Enabled only after password is saved. | -| Clear SSH Tunnel Password | If you have saved the ssh tunnel password, click to clear the saved password. Enabled only after password is saved. | -| Drop Cascade | Click to delete the currently selected object and all dependent objects from the server. | -| Properties... | Click to review or modify the currently selected object's properties | -| Delete/Drop | Click to delete the currently selected object from the server. | -| Connect Database | Click to connect to selected database. | -| Trigger(s) | Click to `Disable` or `Enable` trigger(s) for the currently selected table. | -| Truncate | Click to remove all rows from a table (Truncate) or to remove all rows from a table and its child tables (Truncate Cascade). | -| View/Edit Data | Click to access a context menu that provides several options (All Rows, First 100 Rows, Last 100 Rows, Filtered Rows) for viewing data. | -| Count Rows | Click to count the number of rows of the selected table. | -| Reset Statistics | Click to reset the statistics of the selected table. | -| Scripts | Click to CREATE, DELETE, INSERT, SELECT and UPDATE script for the selected table. | +| Menu Option | Action | +| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Create | Click `Create` to access a context menu that provides context-sensitive selections. Your selection opens a `Create` dialog for creating a new object. | +| Refresh... | Click to refresh the currently selected object. | +| Connect Server | Click to open the [Connect to Server](../01_toc_pem_getting_started/09_pem_connect_to_server/#pem_connect_to_server) dialog to establish a connection with a server. | +| CREATE Script | Click to open the [Query tool](05_keyboard_shortcuts/#query-tool) to edit or view the selected script. | +| Disconnect Server/Database | Click to disconnect the selected server. | +| Remove Server | Click to remove the selected server from the browser tree. | +| BART | Click to access a context menu that provides options for removing BART configuration, taking a BART backup, or revalidate the BART configuration. | +| Clear Saved Password | If you have saved the database server password, click to clear the saved password. Enabled only after password is saved. | +| Clear SSH Tunnel Password | If you have saved the ssh tunnel password, click to clear the saved password. Enabled only after password is saved. | +| Drop Cascade | Click to delete the currently selected object and all dependent objects from the server. | +| Properties... | Click to review or modify the currently selected object's properties | +| Delete/Drop | Click to delete the currently selected object from the server. | +| Connect Database | Click to connect to selected database. | +| Trigger(s) | Click to `Disable` or `Enable` trigger(s) for the currently selected table. | +| Truncate | Click to remove all rows from a table (Truncate) or to remove all rows from a table and its child tables (Truncate Cascade). | +| View/Edit Data | Click to access a context menu that provides several options (All Rows, First 100 Rows, Last 100 Rows, Filtered Rows) for viewing data. | +| Count Rows | Click to count the number of rows of the selected table. | +| Reset Statistics | Click to reset the statistics of the selected table. | +| Scripts | Click to CREATE, DELETE, INSERT, SELECT and UPDATE script for the selected table. | **The Management Menu** @@ -60,22 +59,22 @@ The `Object` menu is context-sensitive. Use the `Object` menu to access the foll Use the `Management` menu to access the following PEM features: -| Menu Option | Action | -|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------| -| Audit Manager... | Click to open the [Audit Manager](../04_toc_pem_features/#audit_manager) and configure auditing on your monitored servers. | -| Auto Discovery... | Click to open the [Auto Discovery](../01_toc_pem_getting_started/#auto_discovery_dialog) dialog to instruct a PEM agent to locate and bind monitored database servers. | -| Capacity Manager... | Click to open the [Capacity Manager](../04_toc_pem_features/08_capacity_manager/#capacity_manager) dialog and analyze historical or project future resource usage. | -| Log Manager... | Click to open the [Log Manager](../04_toc_pem_features/#log_manager) dialog and configure log collection for a server. | -| Manage Alerts... | Click to access the [Manage Alerts](../04_toc_pem_features/09_pem_alerting/#pem_alerting_dialog) tab and create or modify alerting behavior. | -| Manage Charts... | Click to open the [Manage Charts](../04_toc_pem_features/10_pem_manage_charts/#pem_manage_charts) tab to create or modify PEM charts. | -| Manage Dashboards... | Click to open the [Manage Dashboards](../04_toc_pem_features/11_pem_manage_dashboards/#pem_custom_dashboard) dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Manage Probes... | Click to open the [Manage Probes](../05_toc_pem_management_basics/04_maintenance/#maintenance_dialog) dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Postgres Expert... | Click to open the [Postgres Expert](../04_toc_pem_features/07_pem_postgres_expert/#pem_postgres_expert) wizard and perform a static analysis of your servers and databases. | -| Postgres Log Analysis Expert... | Click to access the [Postgres Log Analysis Expert](../04_toc_pem_features/#pem_log_analysis_expert) dialog analyze log file contents for usage trends. | -| Scheduled Tasks... | Click to open the [Scheduled Tasks](../04_toc_pem_features/18_pem_task_view/#pem_task_view) tab and review tasks that are pending or recently completed. | -| Schedule Alert Blackout... | Click to open the [Schedule Alert Blackout](../04_toc_pem_features/#pem_alert_blackout) dialog and schedule the alerts blackout for your servers and agents. | -| Tuning Wizard... | Click to open the [Tuning Wizard](../04_toc_pem_features/#tuning_wizard) dialog to generate a set of tuning recommendations for your server. | -| Reports | Click to open the [Reports](../04_toc_pem_features/#reports) dialog to generate the system configuration report and core usage report for your server. | +| Menu Option | Action | +| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Audit Manager... | Click to open the [Audit Manager](../04_toc_pem_features/04_audit_manager/#audit_manager) and configure auditing on your monitored servers. | +| Auto Discovery... | Click to open the [Auto Discovery](../01_toc_pem_getting_started/06_auto_discovery_dialog/#auto_discovery_dialog) dialog to instruct a PEM agent to locate and bind monitored database servers. | +| Capacity Manager... | Click to open the [Capacity Manager](../04_toc_pem_features/08_capacity_manager/#capacity_manager) dialog and analyze historical or project future resource usage. | +| Log Manager... | Click to open the [Log Manager](../04_toc_pem_features/03_log_manager/#log_manager) dialog and configure log collection for a server. | +| Manage Alerts... | Click to access the [Manage Alerts](../04_toc_pem_features/09_pem_alerting/01_pem_alerting_dialog/#pem_alerting_dialog) tab and create or modify alerting behavior. | +| Manage Charts... | Click to open the [Manage Charts](../04_toc_pem_features/10_pem_manage_charts/#pem_manage_charts) tab to create or modify PEM charts. | +| Manage Dashboards... | Click to open the [Manage Dashboards](../04_toc_pem_features/11_pem_manage_dashboards/01_pem_custom_dashboard/#pem_custom_dashboard) dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| Manage Probes... | Click to open the [Manage Probes](../05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog/#maintenance_dialog) dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER. | +| Postgres Expert... | Click to open the [Postgres Expert](../04_toc_pem_features/07_pem_postgres_expert/#pem_postgres_expert) wizard and perform a static analysis of your servers and databases. | +| Postgres Log Analysis Expert... | Click to access the [Postgres Log Analysis Expert](../04_toc_pem_features/05_pem_log_analysis_expert/#pem_log_analysis_expert) dialog analyze log file contents for usage trends. | +| Scheduled Tasks... | Click to open the [Scheduled Tasks](../04_toc_pem_features/18_pem_task_view/#pem_task_view) tab and review tasks that are pending or recently completed. | +| Schedule Alert Blackout... | Click to open the [Schedule Alert Blackout](../04_toc_pem_features/13_pem_alert_blackout/#pem_alert_blackout) dialog and schedule the alerts blackout for your servers and agents. | +| Tuning Wizard... | Click to open the [Tuning Wizard](../04_toc_pem_features/06_tuning_wizard/#tuning_wizard) dialog to generate a set of tuning recommendations for your server. | +| Reports | Click to open the [Reports](../04_toc_pem_features/21_reports/#reports) dialog to generate the system configuration report and core usage report for your server. | **The Dashboards Menu** @@ -84,7 +83,7 @@ Use the `Management` menu to access the following PEM features: The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access the following options: | Menu Option | Action | -|-----------------------|---------------------------------------------------------------------------------------------| +| --------------------- | ------------------------------------------------------------------------------------------- | | Alerts | Click to open the Alerts Dashboard for the selected node. | | Audit Log | Click to open the Audit Log Analysis Dashboard for the selected node. | | Database Server | Click to open the Database Analysis Dashboard for the selected node. | @@ -108,22 +107,22 @@ The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access Use the `Tools` menu to access the following options: | Menu Option | Action | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Schema Diff | Click to open the [Schema Diff](../08_toc_pem_developer_tools/#schema_diff_feature) dialog to compare the schema objects between two database schemas. | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Schema Diff | Click to open the [Schema Diff](../08_toc_pem_developer_tools/05_schema_diff/#schema_diff_feature) dialog to compare the schema objects between two database schemas. | | Search objects | Click to open the Search Objects dialog to search the database objects within a database. | | Server | Click to access the various server related tools such as Add Named Restore Point, Performance Diagnostics, Queue Server Startup, Queue Server Shutdown, Replace Cluster Primary, Switchover EFM Cluster and SQL Profiler. | -| Query Tool | Click to open the [Query tool](../08_toc_pem_developer_tools/#query_tool) for the currently selected object. | -| Storage Manager | Click to open the [Storage manager](../05_toc_pem_management_basics/#storage_manager) to upload, delete or download the backup files. | +| Query Tool | Click to open the [Query tool](05_keyboard_shortcuts/#query-tool) for the currently selected object. | +| Storage Manager | Click to open the [Storage manager](../05_toc_pem_management_basics/05_storage_manager/#storage_manager) to upload, delete or download the backup files. | | Reload Configuration | Click to update configuration files without restarting the server. | | Pause replay of WAL | Click to pause the replay of the WAL log. | | Resume replay of WAL | Click to resume the replay of the WAL log. | | Import/Export... | Click to open the Import/Export data... dialog to import or export data from a table. | | Maintenance... | Click to open the Maintenance... dialog to VACUUM, ANALYZE, REINDEX, or CLUSTER. | -| Backup... | Click to open the [Backup...](../05_toc_pem_management_basics/#backup_dialog) dialog to backup database objects. | -| Backup Globals... | Click to open the [Backup Globals...](../05_toc_pem_management_basics/#backup_globals_dialog) dialog to backup cluster objects. | -| Backup Server... | Click to open the [Backup Server...](../05_toc_pem_management_basics/#backup_server_dialog) dialog to backup a server. | -| Restore... | Click to access the [Restore](../05_toc_pem_management_basics/#restore_dialog) dialog to restore database files from a backup. | -| Grant Wizard... | Click to access the [Grant Wizard](../05_toc_pem_management_basics/#grant_wizard) tool. | +| Backup... | Click to open the [Backup...](../05_toc_pem_management_basics/06_backup_dialog/#backup_dialog) dialog to backup database objects. | +| Backup Globals... | Click to open the [Backup Globals...](../05_toc_pem_management_basics/07_backup_globals_dialog/#backup_globals_dialog) dialog to backup cluster objects. | +| Backup Server... | Click to open the [Backup Server...](../05_toc_pem_management_basics/08_backup_server_dialog/#backup_server_dialog) dialog to backup a server. | +| Restore... | Click to access the [Restore](../05_toc_pem_management_basics/09_restore_dialog/#restore_dialog) dialog to restore database files from a backup. | +| Grant Wizard... | Click to access the [Grant Wizard](../05_toc_pem_management_basics/01_grant_wizard/#grant_wizard) tool. | | Schedule Backup | Click to access the Schedule Backup dialog for BART backups. | **The Help Menu** @@ -133,7 +132,7 @@ Use the `Tools` menu to access the following options: Use the options on the `Help` menu to access online help documents or to review information about the PEM installation: | Menu Option | Action | -|-----------------------------------|----------------------------------------------------------------------------------| +| --------------------------------- | -------------------------------------------------------------------------------- | | Online Help | Click to open documentation for Postgres Enterprise Manager. | | REST API Reference | Click to open the REST API Reference. | | EnterpriseDB Website | Click to open the EnterpriseDB website in a browser window. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/04_preferences.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/04_preferences.mdx index 42da2162163..a707e873c81 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/04_preferences.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/04_preferences.mdx @@ -4,7 +4,6 @@ title: "PEM Client Preferences" - Use options on the `Preferences` dialog to customize the behavior of the PEM web interface. To open the dialog, select `Preferences` from the `File` menu. The left pane of the `Preferences` dialog displays a tree control; each node of the tree control provides access to options that are related to the node under which they are displayed. - Use the plus sign (+) to the left of a node name to expand a segment of the tree control. @@ -37,7 +36,11 @@ Use the fields on the `Display` panel to specify general display preferences: - Set `Show system objects?` to `True` to display system objects such as system schemas (for example, `pg_temp`) or system columns (for example, `xmin` or `ctid`) in the Browser tree control. - Use the `Lock layout` field to lock the UI layout at different levels. -
OptionAction
None

No locking. Every panel is resizable and dockable.

Prevent docking

This will disable the docking/undocking of the panels

Full

This will disable resizing, docking/undocking of the panels

+| Option | Action | +| ----------------- | ----------------------------------------------------------------- | +| `None` | No locking. Every panel is resizable and dockable.
| +| `Prevent docking` | This will disable the docking/undocking of the panels
| +| `Full` | This will disable resizing, docking/undocking of the panels
| - When the `Show system objects?` switch is set to `True`, the client will display system objects such as system schemas (for example, `pg_temp`) or system columns (for example, `xmin` or `ctid`) in the tree control. @@ -119,7 +122,7 @@ Expand the `Paths` node to specify the locations of supporting utility and help ![Preferences dialog - Binary path section](../images/preferences_paths_binary.png) -Use the fields on the `Binary paths` panel to specify the path to the directory that contains the utility programs (pg\_dump, pg\_restore, and pg\_dumpall) for monitored databases: +Use the fields on the `Binary paths` panel to specify the path to the directory that contains the utility programs (pg_dump, pg_restore, and pg_dumpall) for monitored databases: - Use the `EDB Advanced Server Binary Path` field to specify the location of the EDB Postgres Advanced Server utility programs. If this path is not set, pgAdmin will attempt to find the utilities in standard locations used by EnterpriseDB. - Use the `Greenplum Database Binary Path` field to specify the location of the Greenplum database utility programs. If this path is not set, pgAdmin will attempt to find the utilities in standard locations used by Greenplum. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/05_keyboard_shortcuts.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/05_keyboard_shortcuts.mdx index e0f6a74efe4..e348ccae3b2 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/05_keyboard_shortcuts.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/05_keyboard_shortcuts.mdx @@ -2,23 +2,21 @@ title: "Keyboard Shortcuts" --- - - -Keyboard shortcuts are provided in pgAdmin to allow easy access to specific functions. Alternate shortcuts can be configured through File > Preferences if desired.˝ +Keyboard shortcuts are provided in pgAdmin to allow easy access to specific functions. Alternate shortcuts can be configured through File > Preferences if desired.˝ ## Main Browser Window When using main browser window, the following keyboard shortcuts are available: | Shortcut for all platforms | Function | -|----------------------------|---------------------------------------------| +| -------------------------- | ------------------------------------------- | | Alt+Shift+F | Open the File menu | | Alt+Shift+O | Open the Object menu | | Alt+Shift+L | Open the Tools menu | | Alt+Shift+H | Open the Help menu | | Alt+Shift+B | Focus the browser tree | | Alt+Shift+\[ | Move tabbed panel backward | -| Alt+Shift+\] | Move tabbed panel forward | +| Alt+Shift+] | Move tabbed panel forward | | Alt+Shift+Q | Open the Query Tool in the current database | | Alt+Shift+V | View Data in the selected table/view | | Alt+Shift+C | Open the context menu | @@ -32,16 +30,16 @@ When using main browser window, the following keyboard shortcuts are available: Use the shortcuts below to navigate the tabsets on dialogs: | Shortcut for all platforms | Function | -|----------------------------|---------------------| +| -------------------------- | ------------------- | | Control+Shift+\[ | Dialog tab backward | -| Control+Shift+\] | Dialog tab forward | +| Control+Shift+] | Dialog tab forward | ## Property Grid Controls Use the shortcuts below when working with property grid controls: | Shortcut for all platforms | Function | -|----------------------------|------------------------------------------| +| -------------------------- | ---------------------------------------- | | Control+Shift+A | Add row in Grid | | Tab | Move focus to the next control | | Shift+Tab | Move focus to the previous control | @@ -53,7 +51,7 @@ Use the shortcuts below when working with property grid controls: When using the syntax-highlighting SQL editors, the following shortcuts are available: | Shortcut (Windows/Linux) | Shortcut (Mac) | Function | -|--------------------------|----------------------|-------------------------------------| +| ------------------------ | -------------------- | ----------------------------------- | | Alt + Left | Option + Left | Move to the beginning of the line | | Alt + Right | Option + Right | Move to the end of the line | | Ctrl + Alt + Left | Cmd + Option + Left | Move left one word | @@ -79,58 +77,58 @@ When using the syntax-highlighting SQL editors, the following shortcuts are avai When using the Query Tool, the following shortcuts are available: -| Shortcut (Windows/Linux) | Shortcut (Mac) | Function | -|--------------------------|-----------------------|---------------------------| -| F5 | F5 | Execute query | -| F6 | F6 | Save data changes | -| F7 | F7 | EXPLAIN query | -| Shift + F7 | Shift + F7 | EXPLAIN ANALYZE query | -| F8 | F8 | Execute query to CSV file | -| <accesskey> + o | <accesskey> + o | Open file | -| <accesskey> + s | <accesskey> + s | Save file | -| <accesskey> + n | <accesskey> + n | Find option drop down | -| <accesskey> + c | <accesskey> + c | Copy row(s) | -| <accesskey> + p | <accesskey> + p | Paste row(s) | -| <accesskey> + d | <accesskey> + d | Delete row(s) | -| <accesskey> + f | <accesskey> + f | Filter dialog | -| <accesskey> + i | <accesskey> + i | Filter options drop down | -| <accesskey> + r | <accesskey> + r | Row limit | -| <accesskey> + q | <accesskey> + q | Cancel query | -| <accesskey> + l | <accesskey> + l | Clear option drop down | -| <accesskey> + x | <accesskey> + x | Execute option drop down | -| <accesskey> + t | <accesskey> + t | Display connection status | -| <accesskey> + y | <accesskey> + y | Copy SQL on history panel | +| Shortcut (Windows/Linux) | Shortcut (Mac) | Function | +| ------------------------ | ------------------ | ------------------------- | +| F5 | F5 | Execute query | +| F6 | F6 | Save data changes | +| F7 | F7 | EXPLAIN query | +| Shift + F7 | Shift + F7 | EXPLAIN ANALYZE query | +| F8 | F8 | Execute query to CSV file | +| <accesskey> + o | <accesskey> + o | Open file | +| <accesskey> + s | <accesskey> + s | Save file | +| <accesskey> + n | <accesskey> + n | Find option drop down | +| <accesskey> + c | <accesskey> + c | Copy row(s) | +| <accesskey> + p | <accesskey> + p | Paste row(s) | +| <accesskey> + d | <accesskey> + d | Delete row(s) | +| <accesskey> + f | <accesskey> + f | Filter dialog | +| <accesskey> + i | <accesskey> + i | Filter options drop down | +| <accesskey> + r | <accesskey> + r | Row limit | +| <accesskey> + q | <accesskey> + q | Cancel query | +| <accesskey> + l | <accesskey> + l | Clear option drop down | +| <accesskey> + x | <accesskey> + x | Execute option drop down | +| <accesskey> + t | <accesskey> + t | Display connection status | +| <accesskey> + y | <accesskey> + y | Copy SQL on history panel | ## Debugger When using the Debugger, the following shortcuts are available: -| Shortcut (Windows/Linux) | Shortcut (Mac) | Function | -|--------------------------|-----------------------|------------------------------| -| <accesskey> + i | <accesskey> + i | Step in | -| <accesskey> + o | <accesskey> + o | Step over | -| <accesskey> + c | <accesskey> + c | Continue/Restart | -| <accesskey> + t | <accesskey> + t | Toggle breakpoint | -| <accesskey> + x | <accesskey> + x | Clear all breakpoints | -| <accesskey> + s | <accesskey> + s | Stop | -| Alt + Shift + q | Option + Shift + q | Enter or Edit values in Grid | +| Shortcut (Windows/Linux) | Shortcut (Mac) | Function | +| ------------------------ | ------------------ | ---------------------------- | +| <accesskey> + i | <accesskey> + i | Step in | +| <accesskey> + o | <accesskey> + o | Step over | +| <accesskey> + c | <accesskey> + c | Continue/Restart | +| <accesskey> + t | <accesskey> + t | Toggle breakpoint | +| <accesskey> + x | <accesskey> + x | Clear all breakpoints | +| <accesskey> + s | <accesskey> + s | Stop | +| Alt + Shift + q | Option + Shift + q | Enter or Edit values in Grid | ## Inner Tab and Panel Navigation When using the Query Tool and Debugger, the following shortcuts are available for inner panel navigation: | Shortcut (Windows/Linux) | Shortcut (Mac) | Function | -|--------------------------|-------------------|-------------------------------------| -| Alt + Shift + \] | Alt + Shift + \] | Move to next tab within a panel | +| ------------------------ | ----------------- | ----------------------------------- | +| Alt + Shift + ] | Alt + Shift + ] | Move to next tab within a panel | | Alt + Shift + \[ | Alt + Shift + \[ | Move to previous tab within a panel | | Alt + Shift + Tab | Alt + Shift + Tab | Move between inner panels | ## Access Key -<accesskey> is browser and platform dependant. The following table lists the default access keys for supported browsers. +<accesskey> is browser and platform dependant. The following table lists the default access keys for supported browsers. | Browser | Windows | Linux | Mac | -|-------------------|-------------|-------------|---------------| +| ----------------- | ----------- | ----------- | ------------- | | Internet Explorer | Alt | Alt | | | Chrome | Alt | Alt | Ctrl + Option | | Firefox | Alt + Shift | Alt + Shift | Ctrl + Option | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/06_search_objects.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/06_search_objects.mdx index 2a6c40ad11d..0cb25e66b92 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/06_search_objects.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/06_search_objects.mdx @@ -4,7 +4,6 @@ title: "Search objects" - Search objects dialog With this dialog, you can search for almost any kind of objects in a database. @@ -13,6 +12,6 @@ You can access it by right clicking a database or any of its child nodes and sel The minimum pattern length are 3 characters. The search performed is non-casesensitive and will find all objets whose name contains the pattern. You can only search for object names currently. Examples are: abc, %ab%, ab%c, %%%, etc. -The result is presented in the grid with object name, object type and the object tree path in the browser tree. You can double click on a result row to select the object in the browser tree. If the object is greyed out, this means that you have not enabled those object types in the [preferences](../03_toc_pem_client/#preferences), so you can't double click on it. You can click on the ellipsis appended to the function and procedure names to see there arguments. +The result is presented in the grid with object name, object type and the object tree path in the browser tree. You can double click on a result row to select the object in the browser tree. If the object is greyed out, this means that you have not enabled those object types in the [preferences](04_preferences/#preferences), so you can't double click on it. You can click on the ellipsis appended to the function and procedure names to see there arguments. -You can filter based on a particular object type by selecting one from the object type dropdown. If the search button is hit when one of the object type is selected then only those types will be fetch from the database. An object type will not be visible in the dropdown if the database server does not support it or if it is not enabled from the [preferences](../03_toc_pem_client/#preferences). +You can filter based on a particular object type by selecting one from the object type dropdown. If the search button is hit when one of the object type is selected then only those types will be fetch from the database. An object type will not be visible in the dropdown if the database server does not support it or if it is not enabled from the [preferences](04_preferences/#preferences). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/index.mdx index 7f10e725d10..55b88c9a69e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/index.mdx @@ -4,7 +4,6 @@ title: "The PEM Client" - The Postgres Enterprise Manager client provides a powerful and intuitive user interface that you can use to manage Advanced Server and PostgreSQL databases. The client interface is easily customized, and will preserve your preferences between sessions. Client features include: > - auto-detection and support for objects discovered at run-time @@ -21,13 +20,13 @@ The PEM client features a highly-customizable display that features drag-and-dro The client tree control (the *Browser*) provides an elegant overview of the managed servers, and the objects that reside on each server. Right-click on a node within the tree control to access context-sensitive menus that provide quick access to management tasks for the selected object. The tabbed browser window provide quick access to statistical information about each object in the tree control, tools and utilities, and extended PEM features. The client opens an additional feature tab each time you access the extended functionality offered by PEM; you can open, close, and re-arrange tabs as needed. -You can search for objects in the database using the [Search objects](03_toc_pem_client/#search_objects) +You can search for objects in the database using the [Search objects](06_search_objects/#search_objects) Contents:
-pem\_browser\_window pem\_toolbar pem\_menu\_bar preferences keyboard\_shortcuts search\_objects +pem_browser_window pem_toolbar pem_menu_bar preferences keyboard_shortcuts search_objects
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/01_alerts_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/01_alerts_dashboard.mdx index 0b470767bc4..6df4c7245ca 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/01_alerts_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/01_alerts_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Alerts Dashboard" - The Alerts Dashboard displays the currently triggered alerts; if opened from the Global Overview, the dashboard displays the current alerts for all monitored nodes on the system. If the Alerts Dashboard is opened from a node within a monitored hierarchy, the report will reflect alerts related to that node, and all monitored objects that reside below that object in the tree control. ![Alerts dashboard](../../images/alerts_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The `Alerts Dashboard` header includes the date and time that the page was last updated and a current count of triggered alerts. @@ -18,7 +17,7 @@ The `Alerts Overview` provides an overview of triggered alerts. The right-most b The `Alert Details` table lists the currently triggered alerts for the selected object; if opened from the global overview, the Alert Details table lists all of the currently triggered alerts for all monitored objects. Click a column heading to sort the table by the contents of a selected column; click a second time to reverse the sort order. The table contains detailed information about each alert: - An alert level icon displays in red for a `High` severity alert, in orange for a `Medium` severity alert, and in yellow for a `Low` severity alert. -- Use the arrow to the right of the alert level icon to access a dialog with detailed information about the alert. Within the dialog, the `Details` tab displays detailed information about the condition that triggered the alert; the `Parameters` tab displays the values of parameters used in the alert definition. Not all alerts return data that can be viewed on the `Details` dialog; for information about which templates display detailed metrics, please see the [alert templates list](../../04_toc_pem_features/09_pem_alerting/#pem_alert_templates) +- Use the arrow to the right of the alert level icon to access a dialog with detailed information about the alert. Within the dialog, the `Details` tab displays detailed information about the condition that triggered the alert; the `Parameters` tab displays the values of parameters used in the alert definition. Not all alerts return data that can be viewed on the `Details` dialog; for information about which templates display detailed metrics, please see the [alert templates list](../09_pem_alerting/03_pem_alert_templates/#pem_alert_templates) ![Alert details](../../images/alert_details.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/02_audit_log_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/02_audit_log_dashboard.mdx index d64ce92562e..0c93fc69fe3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/02_audit_log_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/02_audit_log_dashboard.mdx @@ -4,8 +4,7 @@ title: "The Audit Log Analysis Dashboard" - -The Audit Log Dashboard allows you to browse the audit logs that have been collected from Advanced Server instances which have enabled audit logging and collection with the [Audit Manager](../../04_toc_pem_features/#audit_manager). If the Audit Log Dashboard is opened from the Global level, it will display logs from all servers. If opened from the Agent level, it will show logs from all servers monitored by that Agent. If opened from the Server level, it will show logs from that server only. +The Audit Log Dashboard allows you to browse the audit logs that have been collected from Advanced Server instances which have enabled audit logging and collection with the [Audit Manager](../04_audit_manager/#audit_manager). If the Audit Log Dashboard is opened from the Global level, it will display logs from all servers. If opened from the Agent level, it will show logs from all servers monitored by that Agent. If opened from the Server level, it will show logs from that server only. ![Audit Log analysis dashboard](../../images/audit_log_analysis_dashboard.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/03_database_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/03_database_analysis_dashboard.mdx index 16d9ebc7f84..fa70e3fa2f9 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/03_database_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/03_database_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Database Analysis Dashboard" - The Database Analysis dashboard provides a high-level overview of database activity for the selected database, including a comparative storage analysis of the 5 largest tables/indexes, user activity analysis, weekly I/O analysis, and an activity analysis of the tables that reside in the selected database. ![Database analysis dashboard](../../images/database_analysis_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The Database Analysis dashboard header displays the date and time that the server started, the date and time that the Database Analysis dashboard was last updated, and the number of alerts currently triggered for the specified database (and monitored objects that reside within that database). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/04_global_overview_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/04_global_overview_dashboard.mdx index 92fe60c83ac..774fccfef5b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/04_global_overview_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/04_global_overview_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Global Overview Dashboard" - Upon connecting to Postgres Enterprise Manager, the web interface displays the `Global Overview` dashboard. The Global Overview dashboard displays the status of each PEM server and agent, and calls your attention to any triggered alerts on monitored objects. ![Global Overview dashboard](../../images/global_overview.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The `Global Overview` header displays the date and time that the overview was last updated and the current number of triggered alerts. @@ -40,7 +39,7 @@ The `Postgres Server Status` table provides detailed information about the statu - The `Version` column lists the PostgreSQL version and build signature. - The `Remotely Monitored` column displays a `Yes` if the PEM agent that is bound to the monitored server does not reside on the same host as the server, and a `No` if the agent resides on the same host as the server. -Triggered alerts displayed in the `Alert Status` table include both PEM-defined alerts and user-defined alerts for all PEM-monitored hosts, servers, agents and database objects. The `Alert Status` table will also display an alert if an [agent or server is down](../../02_toc_pem_agent/#pem_agent_start_pem_agent). +Triggered alerts displayed in the `Alert Status` table include both PEM-defined alerts and user-defined alerts for all PEM-monitored hosts, servers, agents and database objects. The `Alert Status` table will also display an alert if an [agent or server is down](../../02_toc_pem_agent/03_pem_agent_start_pem_agent/#pem_agent_start_pem_agent). - The `Alarm Type` column reports the alert severity. An icon displays in red for a `High` severity alert, in yellow for a `Medium` severity alert, and in grey for a `Low` severity alert. - The `Object Description` column displays a description of the object that triggered the alert. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/05_io_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/05_io_analysis_dashboard.mdx index 87e3ba53b73..ce377d36bee 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/05_io_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/05_io_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The I/O Analysis Dashboard" - The I/O Analysis dashboard displays usage statistics for a specific database. ![IO Analysis dashboard](../../images/io_analysis_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The I/O Analysis dashboard header displays the date and time that the server started, the date and time that the I/O Analysis dashboard was last updated, and the number of alerts currently triggered for the specified database (and any monitored object that resides within that database). @@ -27,7 +26,7 @@ The graphs in the `I/O Overview` provide information about the week's activity f > The vertical key on the left side of the graph displays the checkpoint count. > -> > A checkpoint is a point in the transaction logging sequence at which all data files have been updated to reflect the information in the log, and data files are flushed to disk. Checkpoints can be automatically generated, or forced by use of the CHECKPOINT command. A timed checkpoint occurs when the checkpoints\_timeout parameter time limit is met. An untimed (requested) checkpoint occurs when the checkpoint\_segments parameter is met, or when a superuser issues the CHECKPOINT command. Frequent checkpointing can impose extra load on the server, but can reduce recovery time in the event of a crash or hardware failure. +> > A checkpoint is a point in the transaction logging sequence at which all data files have been updated to reflect the information in the log, and data files are flushed to disk. Checkpoints can be automatically generated, or forced by use of the CHECKPOINT command. A timed checkpoint occurs when the checkpoints_timeout parameter time limit is met. An untimed (requested) checkpoint occurs when the checkpoint_segments parameter is met, or when a superuser issues the CHECKPOINT command. Frequent checkpointing can impose extra load on the server, but can reduce recovery time in the event of a crash or hardware failure. The `Hot Tables/Indexes` section of the I/O Analysis dashboard provides an overview of the 5 most scanned tables and indexes that reside within the database. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/06_memory_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/06_memory_analysis_dashboard.mdx index ab5e904626e..1050cd4bd32 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/06_memory_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/06_memory_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Memory Analysis Dashboard" - The `Memory Analysis` dashboard provides an overview of the memory usage for the selected server and server host for the previous week: ![Memory Analysis dashboard](../../images/memory_analysis_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The Memory Analysis dashboard header displays the date and time that the server was started, the date and time that the dashboard was last updated and the number of current alerts for objects monitored by the PEM server. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/07_object_activity_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/07_object_activity_analysis_dashboard.mdx index 91bbc3a55a8..e9d15e2f2ed 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/07_object_activity_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/07_object_activity_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Objects Activity Analysis Dashboard" - The Objects Activity Analysis dashboard provides an overview of the size and activity of the objects that reside within the selected database. ![Object Activity Analysis dashboard](../../images/object_activity_analysis_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The Objects Activity Analysis dashboard header displays the date and time that the server started, the date and time that the Object Activity Analysis dashboard was last updated, and the number of alerts currently triggered for the specified database (and monitored objects that reside within that database). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/08_os_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/08_os_analysis_dashboard.mdx index 755e08c67eb..b6f3866611e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/08_os_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/08_os_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Operating System Analysis Dashboard" - The `Operating System Analysis` dashboard provides a graphical analysis of the resource usage on the system hosting the selected agent. ![Operating System Analysis dashboard](../../images/oper_system_analysis.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The `Operating System Analysis` dashboard header displays the date and time that the server was last booted, the date and time that the display was last updated, and the number of triggered alerts on the system. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/09_probe_log_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/09_probe_log_analysis_dashboard.mdx index 8e04935ca38..27fcf4fc810 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/09_probe_log_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/09_probe_log_analysis_dashboard.mdx @@ -4,14 +4,13 @@ title: "The Probe Log Analysis Dashboard" - The Probe Log Analysis dashboard displays error messages from the PEM agent. ![Probe Log Analysis dashboard](../../images/probe_log_analysis.png) The header information includes the date and time that the server was first started, the date and time that the page was last updated, and the current number of triggered alerts. -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The `Probe Log` table displays error messages returned by the PEM Agent. Entries in the Probe Log table may reflect incorrect agent binding information or authentication errors between the PEM agent and the server. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/10_server_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/10_server_analysis_dashboard.mdx index 682b45fbe75..54f546f1d1a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/10_server_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/10_server_analysis_dashboard.mdx @@ -4,14 +4,13 @@ title: "The Server Analysis Dashboard" - The Server Analysis dashboard provides a graphical analysis of a monitored server's usage statistics. ![Server Analysis dashboard](../../images/server_analysis.png) The Server Analysis dashboard header displays the date and time that the server was started, the date and time that the display was last updated, and the number of current alerts for items monitored by the PEM server. -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. Graphs within the `Storage` section of the dashboard provide an analysis of the space consumed by databases and tablespaces on the server: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/11_server_log_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/11_server_log_analysis_dashboard.mdx index 7677fd1d5cf..4b8a81d622c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/11_server_log_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/11_server_log_analysis_dashboard.mdx @@ -4,7 +4,6 @@ title: "The Server Log Analysis Dashboard" - The `Server Log Analysis` dashboard displays the log files for the selected server. To view the `Server Log Analysis` dashboard, right-click on the name of a monitored server in the PEM client tree control, and navigate through the `Dashboards` menu, selecting `Server Log Analysis`. ![Server Log Analysis dashboard](../../images/server_log_analysis_dashboard.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/12_session_activity_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/12_session_activity_analysis_dashboard.mdx index 89ba62d318f..05f7002078b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/12_session_activity_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/12_session_activity_analysis_dashboard.mdx @@ -4,14 +4,13 @@ title: "The Session Activity Analysis Dashboard" - The Session Activity Analysis dashboard provides information about the session workload and lock activity for the selected server: ![Session Activity Analysis dashboard](../../images/session_activity_analysis_dashboard.png) The Session Activity Analysis dashboard header displays the date and time that the server was started, the date and time that the dashboard was last updated and the number of current alerts for the server. -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Session Activity Analysis` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM client `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Session Activity Analysis` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM client `Management` menu. The `Session Workload` table provides information about the current session workload for the server. Click a column heading to sort the table data by the selected column; click the heading a second time to reverse the sort order. The Session Workload table displays the following information: @@ -39,7 +38,7 @@ The `Session Lock Activity` table displays a list of locks held by processes on - The `Blocked By` column specifies the session ID of the session that is holding the lock. - The `Lock Type` column displays the type of lock that is held by the client. Lock Type may be: -> - `advisory` - a user-defined lock created by pg\_advisory\_lock() or pg\_advisory\_lock\_shared() +> - `advisory` - a user-defined lock created by pg_advisory_lock() or pg_advisory_lock_shared() > - `extend` - a lock held while extending a table or index > - `object` - a lock held on a database object > - `page` - a lock held on a page (within the shared buffer cache) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/13_session_waits_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/13_session_waits_dashboard.mdx index 35926cb7c0f..de526ac029a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/13_session_waits_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/13_session_waits_dashboard.mdx @@ -4,19 +4,18 @@ title: "The Session Waits Analysis Dashboard" - The `Session Wait Analysis` dashboard provides an overview of the current DRITA wait events for an Advanced Server session. For more information about DRITA wait events, please see the EDB Postgres Advanced Server Guide. ![Session Waits dashboard](../../images/session_waits_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The Session Wait Analysis dashboard header displays the date and time that the server started, the date and time that the dashboard was last updated, and the number of alerts currently triggered for the specified database (and monitored objects that reside within that database). The `Session Waits Overview` displays statistics gathered by the most recent execution of the PEM probe: - The `Session Waits By Number Of Waits` pie chart displays the 5 most frequently encountered wait events, per Advanced Server session. For more information about the events that can cause a wait event, see the EDB Postgres Advanced Server Guide. -- The `Session Waits By Time Waited` pie chart displays the 5 wait events that consume the most time, per Advanced Server session. To gather and display data in the `Session Time Waits by Time Waited` pie chart, you must modify the `postgresql.conf` file for the monitored server, setting *timed\_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers and the events that can cause a wait event, please see the EDB Postgres Advanced Server Guide. +- The `Session Waits By Time Waited` pie chart displays the 5 wait events that consume the most time, per Advanced Server session. To gather and display data in the `Session Time Waits by Time Waited` pie chart, you must modify the `postgresql.conf` file for the monitored server, setting *timed_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers and the events that can cause a wait event, please see the EDB Postgres Advanced Server Guide. The `Session Waits Details` table lists the current system wait events for the selected database. Click a column heading to sort the table by the column data; click again to reverse the sort order.The table displays: @@ -26,4 +25,4 @@ The `Session Waits Details` table lists the current system wait events for the s - The `Time (ms)` displays the number of milliseconds that the user waited for the specified event. - The `Wait Time (%)` column displays the percentage of the total wait time consumed by the specified wait event. -To gather and display data in the Time (ms) and Wait Time (%) columns, you must modify the `postgresql.conf` file for the monitored server, setting *timed\_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. +To gather and display data in the Time (ms) and Wait Time (%) columns, you must modify the `postgresql.conf` file for the monitored server, setting *timed_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/14_storage_analysis_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/14_storage_analysis_dashboard.mdx index 411207b9316..fcb9623fc10 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/14_storage_analysis_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/14_storage_analysis_dashboard.mdx @@ -4,12 +4,11 @@ title: "The Storage Analysis Dashboard" - The `Storage Analysis` dashboard provides information about the size of objects stored on the server and about available storage space on the server. ![Storage Analysis dashboard](../../images/storage_analysis_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The Storage Analysis dashboard header displays the date and time that the PEM server started, the date and time that the dashboard was most recently updated, and the number of triggered alerts on objects monitored by the PEM server. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/15_system_wait_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/15_system_wait_dashboard.mdx index cb4020ca842..d0c07518a78 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/15_system_wait_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/15_system_wait_dashboard.mdx @@ -4,19 +4,18 @@ title: "The System Wait Analysis Dashboard" - The System Wait Analysis dashboard provides an overview of the current DRITA wait events for an Advanced Server database. For more information about DRITA wait events, please see the EDB Postgres Advanced Server Guide. ![System Waits dashboard](../../images/system_waits_dashboard.png) -Use parameters on the [PEM Server Configurations](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. +Use parameters on the [PEM Server Configurations](../02_pem_server_config/#pem_server_config) dialog to specify the auto-refresh rate for the `Alerts` dashboard. To access the `Server Configuration` dialog, select `Server Configuration...` from the PEM web interface `Management` menu. The System Waits Analysis dashboard header displays the date and time that the server started, the date and time that the System Waits Analysis dashboard was last updated, and the number of alerts currently triggered for the specified database (and monitored objects that reside within that database). The `System Waits Overview` displays statistics gathered by the most recent execution of the PEM probe: - The `System Waits by Number of Waits` pie chart displays the 5 most frequently encountered wait events for the selected Advanced Server server. For more information about the events that can cause a wait event, see the EDB Postgres Advanced Server Guide. -- The `System Waits by Time Waited` pie chart displays the 5 wait events that consume the most time for the selected Advanced Server server. To gather and display data in the `System Waits by Time Waited` pie chart, you must modify the `postgresql.conf` file for the monitored server, setting *timed\_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. +- The `System Waits by Time Waited` pie chart displays the 5 wait events that consume the most time for the selected Advanced Server server. To gather and display data in the `System Waits by Time Waited` pie chart, you must modify the `postgresql.conf` file for the monitored server, setting *timed_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. The `System Waits Details` table lists the current system wait events for the selected server. Click a column heading to sort the table by the column data; click again to reverse the sort order.The table displays: @@ -27,4 +26,4 @@ The `System Waits Details` table lists the current system wait events for the se - The `Percent of Time Waited` displays the percentage of the total wait time consumed by this event. - The `Average Wait Time (ms)` column displays the average wait time for this event. -To gather and display data in the `Time Waited (ms)` and `Percent of Time Waited` columns, you must modify the `postgresql.conf` file for the monitored server, setting *timed\_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. +To gather and display data in the `Time Waited (ms)` and `Percent of Time Waited` columns, you must modify the `postgresql.conf` file for the monitored server, setting *timed_statistics = on*, and restart the server. Please note that this will cause server performance to degrade. For more information about using Advanced Server DRITA timers, please see the EDB Postgres Advanced Server Guide. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/16_str_replication_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/16_str_replication_dashboard.mdx index 93707db275b..ceb777332e7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/16_str_replication_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/16_str_replication_dashboard.mdx @@ -4,7 +4,6 @@ title: "The Streaming Replication Analysis Dashboard" - The `Streaming Replication Analysis` Dashboard displays statistical information about WAL activity for a monitored server. By default, replication probes are disabled; to view the `Streaming Replication Analysis` dashboard, you must enable probes on the primary and replica nodes. To enable the probes on the primary node, highlight the name of the primary server in the PEM client `Browser` tree control, and select `Manage Probes...` from the `Management` menu. Use the `Manage Probes` tab to enable the following probes: - Streaming Replication @@ -50,7 +49,7 @@ A label at the bottom of the dashboard confirms the status of the replication re ## Monitoring a Failover Manager Cluster -If you have configured PEM to monitor a [Failover Manager](../../04_toc_pem_features/#monitoring_a_failover_manager_cluster) cluster, the Streaming Replication Analysis dashboard will display tables that provide an overview of the clusters status and configuration, and information about each cluster member. To display cluster information on the Streaming Replication dashboard, you must provide the following information on the `Advanced` tab of the server `Properties` dialog for each node of the cluster: +If you have configured PEM to monitor a [Failover Manager](#monitoring-a-failover-manager-cluster) cluster, the Streaming Replication Analysis dashboard will display tables that provide an overview of the clusters status and configuration, and information about each cluster member. To display cluster information on the Streaming Replication dashboard, you must provide the following information on the `Advanced` tab of the server `Properties` dialog for each node of the cluster: - Use the `EFM Cluster Name` field to specify the name of the Failover Manager cluster. The cluster name is the prefix of the name of the cluster properties file. For example, if your cluster properties file is named `efm.properties`, your cluster name is `efm`. - Use the `EFM Installation Path` field to specify the location of the Failover Manager binary file. By default, the Failover Manager binary file is installed in `/usr/edb/efm-3.1/bin`. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/index.mdx index d1ec34aa910..ec8fb998f8e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/01_dashboards/index.mdx @@ -4,7 +4,6 @@ title: "Dashboards" - Postgres Enterprise Manager uses metrics (retrieved by probes) to generate the statistical information displayed on the dashboards. Dashboards are presented in a hierarchy comparable to the PEM client tree control; the dashboard for each object within the tree control displays the information for that object, as well as for any monitored object that resides below that level in the tree control, if appropriate. Each dashboard header displays the date and time that the server was started (if relevant), the date and time that the dashboard was last updated, and the current number of triggered alerts. Navigation menus displayed in the dashboard header provide easy access to other dashboards. Menus are organised hierarchically; only those menus appropriate for the object currently highlighted in the tree control are available: @@ -53,7 +52,7 @@ If displayed, click the information icon in the upper-left hand corner of a char Navigation menus in the dashboard header provide easy access to other dashboards. The menus are organized hierarchically, allowing you to jump from object to object at any level: -- The [Global Overview](../04_toc_pem_features/01_dashboards/#global_overview_dashboard) option opens the `Global Overview` dashboard. +- The [Global Overview](04_global_overview_dashboard/#global_overview_dashboard) option opens the `Global Overview` dashboard. - The `Agents` menu expands to display a list of agents. Select an agent from the list to access the `Operating System Analysis` dashboard for that agent. - The `Servers` menu expands to display a list of monitored servers. Select a server from the list to access the `Server Analysis` dashboard for that server. - The `Remote Servers` menu expands to display a list of servers that are monitored by a remote agent. Select a server from the list to access the `Server Analysis` dashboard for the server. @@ -62,33 +61,33 @@ Navigation menus in the dashboard header provide easy access to other dashboards ## Creating custom charts and dashboards -PEM (version 4.0 and above) allows you to create your own [Charts](../04_toc_pem_features/10_pem_manage_charts/#pem_create_new_chart) and [Dashboards](../04_toc_pem_features/11_pem_manage_dashboards/#pem_custom_dashboard), allowing you to tailor the interface to the requirements of your organization or individual responsibility. +PEM (version 4.0 and above) allows you to create your own [Charts](../10_pem_manage_charts/01_pem_create_new_chart/#pem_create_new_chart) and [Dashboards](../11_pem_manage_dashboards/01_pem_custom_dashboard/#pem_custom_dashboard), allowing you to tailor the interface to the requirements of your organization or individual responsibility. ## Available Dashboards PEM offers the following dashboards: -- [Alerts Dashboard](../04_toc_pem_features/01_dashboards/#alerts_dashboard) -- [Audit Log Dashboard](../04_toc_pem_features/01_dashboards/#audit_log_dashboard) -- [Database Analysis Dashboard](../04_toc_pem_features/01_dashboards/#database_analysis_dashboard) -- [Global Overview Dashboard](../04_toc_pem_features/01_dashboards/#global_overview_dashboard) +- [Alerts Dashboard](01_alerts_dashboard/#alerts_dashboard) +- [Audit Log Dashboard](02_audit_log_dashboard/#audit_log_dashboard) +- [Database Analysis Dashboard](03_database_analysis_dashboard/#database_analysis_dashboard) +- [Global Overview Dashboard](04_global_overview_dashboard/#global_overview_dashboard) - I/O Analysis Dashboard <io\_analysis\_dashboard> -- [Memory Analysis Dashboard](../04_toc_pem_features/01_dashboards/#memory_analysis_dashboard) -- [Object Activity Analysis Dashboard](../04_toc_pem_features/01_dashboards/#object_activity_analysis_dashboard) -- [Operating System Analysis Dashboard](../04_toc_pem_features/01_dashboards/#os_analysis_dashboard) -- [Probe Log Analysis Dashboard](../04_toc_pem_features/01_dashboards/#probe_log_analysis_dashboard) -- [Server Analysis Dashboard](../04_toc_pem_features/01_dashboards/#server_analysis_dashboard) -- [Server Log Analysis Dashboard](../04_toc_pem_features/01_dashboards/#server_log_analysis_dashboard) -- [Session Activity Analysis Dashboard](../04_toc_pem_features/01_dashboards/#session_activity_analysis_dashboard) -- [Session Wait Analysis Dashboard](../04_toc_pem_features/01_dashboards/#session_waits_dashboard) -- [Storage Analysis Dashboard](../04_toc_pem_features/01_dashboards/#storage_analysis_dashboard) -- [System Wait Analysis Dashboard](../04_toc_pem_features/01_dashboards/#system_wait_dashboard) -- [Streaming Replication Analysis Dashboard](../04_toc_pem_features/01_dashboards/#str_replication_dashboard) +- [Memory Analysis Dashboard](06_memory_analysis_dashboard/#memory_analysis_dashboard) +- [Object Activity Analysis Dashboard](07_object_activity_analysis_dashboard/#object_activity_analysis_dashboard) +- [Operating System Analysis Dashboard](08_os_analysis_dashboard/#os_analysis_dashboard) +- [Probe Log Analysis Dashboard](09_probe_log_analysis_dashboard/#probe_log_analysis_dashboard) +- [Server Analysis Dashboard](10_server_analysis_dashboard/#server_analysis_dashboard) +- [Server Log Analysis Dashboard](11_server_log_analysis_dashboard/#server_log_analysis_dashboard) +- [Session Activity Analysis Dashboard](12_session_activity_analysis_dashboard/#session_activity_analysis_dashboard) +- [Session Wait Analysis Dashboard](13_session_waits_dashboard/#session_waits_dashboard) +- [Storage Analysis Dashboard](14_storage_analysis_dashboard/#storage_analysis_dashboard) +- [System Wait Analysis Dashboard](15_system_wait_dashboard/#system_wait_dashboard) +- [Streaming Replication Analysis Dashboard](16_str_replication_dashboard/#str_replication_dashboard) Contents:
-alerts\_dashboard audit\_log\_dashboard database\_analysis\_dashboard global\_overview\_dashboard io\_analysis\_dashboard memory\_analysis\_dashboard object\_activity\_analysis\_dashboard os\_analysis\_dashboard probe\_log\_analysis\_dashboard server\_analysis\_dashboard server\_log\_analysis\_dashboard session\_activity\_analysis\_dashboard session\_waits\_dashboard storage\_analysis\_dashboard system\_wait\_dashboard str\_replication\_dashboard +alerts_dashboard audit_log_dashboard database_analysis_dashboard global_overview_dashboard io_analysis_dashboard memory_analysis_dashboard object_activity_analysis_dashboard os_analysis_dashboard probe_log_analysis_dashboard server_analysis_dashboard server_log_analysis_dashboard session_activity_analysis_dashboard session_waits_dashboard storage_analysis_dashboard system_wait_dashboard str_replication_dashboard
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/01_pem_config_options.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/01_pem_config_options.mdx index f2b80f951d0..40d08dd1b55 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/01_pem_config_options.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/01_pem_config_options.mdx @@ -4,9 +4,142 @@ title: "Configuration Options" - -A number of aspects of PEM's behaviour can be controlled using global configuration options. Use the [Server Configuration dialogue](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) to manage Server Options. The configuration parameters used are listed below. +A number of aspects of PEM's behaviour can be controlled using global configuration options. Use the [Server Configuration dialogue](./#pem_server_config) to manage Server Options. The configuration parameters used are listed below. Please note that this list is subject to change. -
Parameter nameValue/UnitDescription
audit_log_retention_time30 daysSpecifies the number of days that an audit log will be retained on the PEM server.
auto_create_agent_alertstrueSpecifies whether to create default agent level alerts automatically when an agent is registered.
auto_create_server_alertstrueSpecifies whether to create default server level alerts automatically when a server is bound to an agent.
chart_disable_bulletsfalseEnable/disable bullets on line charts on dashboards and Capacity Manager reports.
cm_data_points_per_report50Specifies the number of data points to plot on charts on Capacity Manager reports.
cm_max_end_date_in_years5 yearsSpecifies the maximum amount of time that the Capacity Manager will extrapolate data for. Ensures that threshold-based end dates of on reports do not get extrapolated indefinitely.
dash_alerts_timeout60 secondsSpecifies the number of seconds after which the components of the Alerts dashboard are auto-refreshed.
dash_db_comrol_span7 daysSpecifies the number of days worth of data to plot on the Commit/Rollback Analysis chart on the Database Analysis dashboard and Server Analysis dashboard.
dash_db_comrol_timeout1800 secondsSpecifies the number of seconds after which the Commits/Rollbacks line chart is auto-refreshed on the Database Analysis dashboard and Server Analysis dashboard.
dash_db_connovervw_timeout300 secondsSpecifies the number of seconds after which the Connection Overview pie chart is auto-refreshed in the Database Analysis dashboard.
dash_db_eventlag_span

7 days

Specifies the number of days worth of data to plot on the Number of Events Lag chart for slony replication on the Database Analysis dashboard.
dash_db_eventlag_timeout1800 secondsSpecifies the number of seconds after which the Number of Events Lag line chart for slony replication is auto-refreshed on the Database Analysis dashboard.
dash_db_hottable_rows25 rowsSpecifies the number of rows to show on the HOT Table Analysis table on the Database Analysis dashboard.
dash_db_hottable_timeout300 secondsSpecifies the number of seconds after which the Hot Tables table is auto-refreshed in the Database Analysis dashboard.
dash_db_io_span7 daysSpecifies the number of days worth of data to plot on the Database I/O Analysis chart on the Database Analysis dashboard and I/O Analysis dashboard.
dash_db_io_timeout1800 secondsSpecifies the number of seconds after which the Database I/O line chart is auto-refreshed on the Database Analysis dashboard and I/O Analysis dashboard.
dash_db_rowact_span7 daysSpecifies the number of days worth of data to plot on the Row Activity Analysis chart on the Database Analysis dashboard, the I/O Analysis dashboard, and the Server Analysis dashboard.
dash_db_rowact_timeout1800 secondsSpecifies the number of seconds after which the Row Activity line chart is auto-refreshed on the Database Analysis dashboard, the I/O Analysis dashboard, and the Server Analysis dashboard.
dash_db_storage_timeout300 secondsSpecifies the number of seconds after which the Storage bar chart is auto-refreshed in the Database Analysis dashboard.
dash_db_timelag_span7 daysSpecifies the number of days worth of data to plot on the Time Lag chart for Slony replication on the Database Analysis dashboard.
dash_db_timelag_timeout1800 secondsSpecifies the number of seconds after which the Time Lag line chart for slony replication is auto-refreshed on the Database Analysis dashboard.
dash_db_useract_span7 daysSpecifies the number of days worth of data to plot on the User Activity Analysis chart on the Database Analysis dashboard.
dash_db_useract_timeout1800 secondsSpecifies the number of seconds after which the User Activity line chart is auto-refreshed in the Database Analysis dashboard.
dash_efm_timeout300 secondsSpecifies the number of seconds after which the Failover Manager Node Status and Failover Manager Cluster Info line chart is auto-refreshed on the Streaming Replication dashboard.
dash_global_overview_timeout30 secondsSpecifies the number of seconds after which the components of the Global Overview dashboard are auto-refreshed.
dash_header_timeout60 secondsSpecifies the number of seconds after which the information on the header of all the dashboards are auto-refreshed.
dash_io_chkpt_span7 daysSpecifies the number of days worth of data to plot on the Checkpoints chart on the I/O Analysis dashboard.
dash_io_chkpt_timeout1800 secondsSpecifies the number of seconds after which the Checkpoints line chart is auto-refreshed on the I/O Analysis dashboard.
dash_io_hotindx_timeout300 secondsSpecifies the number of seconds after which the Hot Indexes bar chart is auto-refreshed on the I/O Analysis dashboard.
dash_io_hottbl_timeout300 secondsSpecifies the number of seconds after which the Hot Tables bar chart is auto-refreshed on the I/O Analysis dashboard.
dash_io_index_objectio_rows25 rowsSpecifies the number of rows displayed on the Index Activity table on the I/O Analysis dashboard and the Object Activity Analysis dashboard.
dash_io_index_objectio_timeout60 secondsSpecifies the number of seconds after which the Index Activity table is auto-refreshed on the I/O Analysis dashboard and the Object Activity Analysis dashboard.
dash_io_objectio_rows25 rowsSpecifies the number of rows displayed in the Object I/O Details table on the I/O Analysis dashboard and Object Activity Analysis dashboard.
dash_io_objectio_timeout300 secondsSpecifies the number of seconds after which the Object I/O Details table is auto-refreshed on the I/O Analysis dashboard and Object Activity Analysis dashboard.
dash_memory_hostmemact_span7 daysSpecifies the number of days worth of data to plot on the Host Memory Activity Analysis chart on the Memory Analysis dashboard.
dash_memory_hostmemact_timeout1800 secondsSpecifies the number of seconds after which the Host Memory Activity line chart is auto-refreshed on the Memory Analysis dashboard.
dash_memory_hostmemconf_timeout300 secondsSpecifies the number of seconds after which the Host Memory Configuration pie chart is auto-refreshed on the Memory Analysis dashboard and Server Analysis dashboard.
dash_memory_servmemact_span7 daysSpecifies the number of days worth of data to plot on the server Memory Activity Analysis chart on the Memory Analysis dashboard.
dash_memory_servmemact_timeout1800 secondsSpecifies the number of seconds after which the Server Memory Activity line chart is auto-refreshed on the Memory Analysis dashboard.
dash_memory_servmemconf_timeout300 secondsSpecifies the number of seconds after which the Server Memory Configuration pie chart is auto-refreshed on the Memory Analysis dashboard.
dash_objectact_objstorage_rows15 rowsSpecifies the number of rows to show on the Object Storage table on the Object Activity Analysis dashboard.
dash_objectact_objstorage_timeout300 secondsSpecifies the number of seconds after which the Object Storage table is auto-refreshed in the Object Activity Analysis dashboard.
dash_objectact_objtopindexes_timeout300 secondsSpecifies the number of seconds after which the Top 5 Largest Indexes bar chart is auto-refreshed in the Object Activity Analysis dashboard.
dash_objectact_objtoptables_timeout300 secondsSpecifies the number of seconds after which the Top 5 Largest Tables bar chart is auto-refreshed in the Object Activity Analysis dashboard.
dash_os_cpu_span7 daysSpecifies the number of days worth of data to plot on the CPU chart on the Operating System Analysis dashboard.
dash_os_cpu_timeout1800 secondsSpecifies the number of seconds after which the CPU line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_data_span7 daysSpecifies the number of days worth of data to plot on the I/O line chart on the Operating System Analysis dashboard.
dash_os_disk_span7 daysSpecifies the number of days worth of data to plot on the Utilisation chart on the Operating System Analysis dashboard.
dash_os_hostfs_timeout1800 secondsSpecifies the number of seconds after which the Host File System Details table is auto-refreshed on the Operating System Analysis dashboard.
dash_os_io_timeout1800 secondsSpecifies the number of seconds after which the I/O line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_memory_span7 daysSpecifies the number of days worth of data to plot on the Memory chart on the Operating System Analysis dashboard.
dash_os_memory_timeout1800 secondsSpecifies the number of seconds after which the Memory line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_packet_span7 daysSpecifies the number of days worth of data to plot on the Packet chart on the Operating System Analysis dashboard.
dash_os_packet_timeout1800 secondsSpecifies the number of seconds after which the Network Packets line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_process_span7 daysSpecifies the number of days worth of data to plot on the Process chart on the Operating System Analysis dashboard.
dash_os_process_timeout1800 secondsSpecifies the number of seconds after which the Process line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_storage_timeout1800 secondsSpecifies the number of seconds after which the Storage pie chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_traffic_span7 daysSpecifies the number of days worth of data to plot on the Traffic chart on the Operating System Analysis dashboard.
dash_os_traffic_timeout1800 secondsSpecifies the number of seconds after which the Traffic line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_os_util_timeout1800 secondsSpecifies the number of seconds after which the Utilisation line chart is auto-refreshed on the Operating System Analysis dashboard.
dash_probe_log_timeout300 secondsSpecifies the number of seconds after which the Probe Log table is auto-refreshed on the Probe Log Analysis dashboard.
dash_replication_archivestat_span7 daysSpecifies the number of days worth of data to plot on the WAL Archive Status chart on the Streaming Replication Analysis dashboard.
dash_replication_archivestat_timeout1800 secondsSpecifies the number of seconds after which the WAL Archive Status line chart is auto-refreshed on the Streaming Replication dashboard.
dash_replication_pagelag_span7 daysSpecifies the number of days worth of data to plot on the WAL Lag Pages chart on the Streaming Replication dashboard.
dash_replication_pagelag_timeout1800 secondsSpecifies the number of seconds after which the WAL Lag Pages line chart is auto-refreshed on the Streaming Replication dashboard.
dash_replication_segmentlag_span7 daysSpecifies the number of days worth of data to plot on the WAL Lag Segments chart on the Streaming Replication dashboard.
dash_replication_segmentlag_timeout1800 secondsSpecifies the number of seconds after which the WAL Lag Segments line chart is auto-refreshed on the Streaming Replication dashboard.
dash_replication_timelag_span7 daysSpecifies the number of days worth of data to plot on the Replication Lag Time chart on the Streaming Replication dashboard.
dash_replication_timelag_timeout1800 secondsSpecifies the number of seconds after which the Replication Lag Time line chart is auto-refreshed on the Streaming Replication dashboard.
dash_server_buffers_written168 hoursSpecifies the number of days worth of data to plot on the Background Writer Statistics chart on the Server Analysis dashboard.
dash_server_buffers_written_timeout300 secondsSpecifies the number of seconds after which the Background Writer Statistics line chart is auto-refreshed on the Server Analysis dashboard.
dash_server_connovervw_timeout300 secondsSpecifies the number of seconds after which the Connection Overview pie chart is auto-refreshed in the Server Analysis dashboard.
dash_server_database_timeout300 secondsSpecifies the number of seconds after which the Databases table is auto-refreshed in the Server Analysis dashboard.
dash_server_dbsize_span7 daysSpecifies the number of days worth of data to plot on the Database Size Analysis chart on the Server Analysis dashboard.
dash_server_dbsize_timeout1800 secondsSpecifies the number of seconds after which the Database Size line chart is auto-refreshed in the Server Analysis dashboard.
dash_server_disk_timeout1800 secondsSpecifies the number of seconds after which the Disk line chart is auto-refreshed in the Server Analysis dashboard.
dash_server_global_span7 daysSpecifies the number of days worth of data to plot on the Disk line chart on the Server Analysis dashboard.
dash_server_sharedbuff_span7 daysSpecifies the number of days worth of data to plot on the Shared Buffer chart on the Server Analysis dashboard.
dash_server_sharedbuff_timeout1800 secondsSpecifies the number of seconds after which the Shared Buffers line chart is auto-refreshed in the Server Analysis dashboard.
dash_server_tabspacesize_span7 daysSpecifies the number of days worth of data to plot on the Tablespace Size chart on the Server Analysis dashboard.
dash_server_tabspacesize_timeout1800 secondsSpecifies the number of seconds after which the Tablespace Size line chart is auto-refreshed in the Server Analysis dashboard.
dash_server_useract_span7 daysSpecifies the number of days worth of data to plot on the User Activity chart on the Server Analysis dashboard.
dash_server_useract_timeout1800 secondsSpecifies the number of seconds after which the User Activity line chart is auto-refreshed in the Server Analysis dashboard.
dash_sessact_lockact_timeout300 secondsSpecifies the number of seconds after which the Session Lock Activity table is auto-refreshed in the Session Activity Analysis dashboard.
dash_sessact_workload_timeout300 secondsSpecifies the number of seconds after which the Session Workload table is auto-refreshed in the Session Activity Analysis dashboard.
dash_sess_waits_nowaits_timeout300 secondsSpecifies the number of seconds after which the Session Waits By Number Of Waits pie chart is auto-refreshed in the Session Waits Analysis dashboard.
dash_sess_waits_timewait_timeout300 secondsSpecifies the number of seconds after which the Session Waits By Time Waited pie chart is auto-refreshed in the Session Waits Analysis dashboard.
dash_sess_waits_waitdtl_timeout300 secondsSpecifies the number of seconds after which the Session Waits Details table is auto-refreshed in the Session Waits Analysis dashboard.
dash_storage_dbdtls_timeout300 secondsSpecifies the number of seconds after which the Database Details table is auto-refreshed in the Storage Analysis dashboard.
dash_storage_dbovervw_timeout300 secondsSpecifies the number of seconds after which the Database Overview pie chart is auto-refreshed in the Storage Analysis dashboard.
dash_storage_hostdtls_timeout300 secondsSpecifies the number of seconds after which the Host Details table is auto-refreshed in the Storage Analysis dashboard.
dash_storage_hostovervw_timeout300 secondsSpecifies the number of seconds after which the Host Overview pie chart is auto-refreshed in the Storage Analysis dashboard.
dash_storage_tblspcdtls_timeout300 secondsSpecifies the number of seconds after which the Tablespace Details table is auto-refreshed in the Storage Analysis dashboard.
dash_storage_tblspcovervw_timeout300 secondsSpecifies the number of seconds after which the Tablespace Overview pie chart is auto-refreshed in the Storage Analysis dashboard.
dash_sys_waits_nowaits_timeout300 secondsSpecifies the number of seconds after which the System Waits By Number Of Waits pie chart is auto-refreshed in the System Waits Analysis dashboard.
dash_sys_waits_timewait_timeout300 secondsSpecifies the number of seconds after which the System Waits By Time Waited pie chart is auto-refreshed in the System Waits Analysis dashboard.
dash_sys_waits_waitdtl_timeout300 secondsSpecifies the number of seconds after which the System Waits Details table is auto-refreshed in the System Waits Analysis dashboard.
deleted_charts_retention_time7 daysSpecifies the number of days that a custom chart (displayed on a user-defined dashboard) is stored.
deleted_probes_retention_time7 daysSpecifies the number of days that a custom probe (displayed on a user-defined dashboard) is stored.
download_chart_formatjpegSpecifies the format in which a downloaded chart will be stored. May be jpeg or png.
flapping_detection_state_change3

Specifies the number of state changes detected within a specified interval to define a given alert as flapping.

  • Flapping starts when more than N state changes have occurred over [ N + 1 * (min(probe_interval) * 2)] minutes and the fine state is not None. Where the default value of N is 2 or 3, and min(probe_interval) is the smallest interval for all the probes used by the alert.
  • Flapping ends when ZERO state changes have occurred over [2 N * min(probe_interval)] minutes.
job_retention_time30 daysSpecifies the number of days that non-recurring scheduled tasks and their associated logs are retained, after their execution time.
long_running_transaction_minutes5 minutesSpecifies the number of minutes a query executes for before being considered long running.
nagios_cmd_file_name<file_name>Specifies nagios command file to which passive service check result will be sent.
nagios_enabledtSpecifies whether alert notification will be submitted to nagios or not.
nagios_medium_alert_as_criticalfSpecifies whether medium level PEM alert will be considered as critical in nagios.
nagios_spool_retention_time7 daysSpecifies the number of days to retain nagios messages in the spool table before they are discarded.
reminder_notification_interval24 hoursSpecifies the number of hours after which a reminder email is sent in case an alert has not been cleared.
server_log_retention_time30 daysSpecifies the number of days that the server log is retained on the PEM server.
show_data_tab_on_graphfalseIf 'true', a Data tab is added to each graph. Select the Data tab to review the data that is plotted on the graph.
smtp_authenticationfalseSpecifies whether to enable/disable authentication over SMTP.
smtp_enabledtrueSpecifies whether to enable/disable sending of emails.
smtp_encryptionfalseSpecifies whether to send SMTP email using an encrypted connection.
smtp_passwordSpecifies the password to be used to connect to the SMTP server.
smtp_port25Specifies the SMTP server port to be used for sending email.
smtp_server127.0.0.1Specifies the SMTP server host address to be used for sending email.
smtp_spool_retention_time7 daysSpecifies the number of days to retain sent email messages in the spool table before they are discarded.
smtp_usernameSpecifies the username to be used to connect to SMTP server.
snmp_communitypublicSpecifies the SNMP community used when sending traps. Used only with SNMPv1 and SNMPv2.
snmp_enabledtrueSpecifies whether to enable/disable sending SNMP traps.
snmp_port162Specifies the SNMP server port to be used for sending SNMP traps.
snmp_server127.0.0.1Specifies the SNMP server host address to be used for sending SNMP traps.
snmp_spool_retention_time7 daysSpecifies the number of days to retain sent traps in the spool table before they are discarded.
snmp_security_nameSpecifies the user name or security name for sending SNMP traps. Used only with SNMPv3.
snmp_security_engine_idSpecifies the Engine id of the SNMP Agent on the SNMP Server. Used only with SNMPv3.
snmp_security_levelNOAUTH_NOPRIVSpecifies Security level and its possible values can be: AUTH_NOPRIV - Authentication, No Privacy AUTH_PRIV - Authentication, Privacy NOAUTH_NOPRIV - no Authentication, no Privacy. Used only with SNMPv3.
snmp_context_nameSpecifies the Context name, the identifier for MIB objects when sending SNMP traps. Used only with SNMPv3.
snmp_context_engine_idSpecifies the Context engine id, the identifier for MIB objects when sending SNMP traps. If not specified, snmp_security_engine_id will be used. Used only with SNMPv3.
snmp_authentication_protocolNONESpecifies the authentication type for SNMP traps. Its possible values can be NONE, HMACMD5 or HMACSHA. Used only with SNMPv3.
snmp_privacy_protocolNONESpecifies the privacy protocol for SNMP traps. Its possible values can be NONE, DES, AES128, IDEA, AES192, or AES256. Used only with SNMPv3.
snmp_authentication_passwordSpecifies the authentication password associated with security name mentioned in snmp_security_name. Used only for SNMPv3.
snmp_privacy_passwordSpecifies the privacy password associated with security name mentioned in snmp_security_name. Used only for SNMPv3.
webclient_help_pgEnterpriseDB hosted documentationSpecifies the location of the online PostgreSQL core documentation.
+| Parameter name | Value/Unit | Description | +| ------------------------------------ | ------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| audit_log_retention_time | 30 days | Specifies the number of days that an audit log will be retained on the PEM server. | +| auto_create_agent_alerts | true | Specifies whether to create default agent level alerts automatically when an agent is registered. | +| auto_create_server_alerts | true | Specifies whether to create default server level alerts automatically when a server is bound to an agent. | +| chart_disable_bullets | false | Enable/disable bullets on line charts on dashboards and Capacity Manager reports. | +| cm_data_points_per_report | 50 | Specifies the number of data points to plot on charts on Capacity Manager reports. | +| cm_max_end_date_in_years | 5 years | Specifies the maximum amount of time that the Capacity Manager will extrapolate data for. Ensures that threshold-based end dates of on reports do not get extrapolated indefinitely. | +| dash_alerts_timeout | 60 seconds | Specifies the number of seconds after which the components of the Alerts dashboard are auto-refreshed. | +| dash_db_comrol_span | 7 days | Specifies the number of days worth of data to plot on the Commit/Rollback Analysis chart on the Database Analysis dashboard and Server Analysis dashboard. | +| dash_db_comrol_timeout | 1800 seconds | Specifies the number of seconds after which the Commits/Rollbacks line chart is auto-refreshed on the Database Analysis dashboard and Server Analysis dashboard. | +| dash_db_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is auto-refreshed in the Database Analysis dashboard. | +| dash_db_eventlag_span |


7 days

| Specifies the number of days worth of data to plot on the Number of Events Lag chart for slony replication on the Database Analysis dashboard. | +| dash_db_eventlag_timeout | 1800 seconds | Specifies the number of seconds after which the Number of Events Lag line chart for slony replication is auto-refreshed on the Database Analysis dashboard. | +| dash_db_hottable_rows | 25 rows | Specifies the number of rows to show on the HOT Table Analysis table on the Database Analysis dashboard. | +| dash_db_hottable_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables table is auto-refreshed in the Database Analysis dashboard. | +| dash_db_io_span | 7 days | Specifies the number of days worth of data to plot on the Database I/O Analysis chart on the Database Analysis dashboard and I/O Analysis dashboard. | +| dash_db_io_timeout | 1800 seconds | Specifies the number of seconds after which the Database I/O line chart is auto-refreshed on the Database Analysis dashboard and I/O Analysis dashboard. | +| dash_db_rowact_span | 7 days | Specifies the number of days worth of data to plot on the Row Activity Analysis chart on the Database Analysis dashboard, the I/O Analysis dashboard, and the Server Analysis dashboard. | +| dash_db_rowact_timeout | 1800 seconds | Specifies the number of seconds after which the Row Activity line chart is auto-refreshed on the Database Analysis dashboard, the I/O Analysis dashboard, and the Server Analysis dashboard. | +| dash_db_storage_timeout | 300 seconds | Specifies the number of seconds after which the Storage bar chart is auto-refreshed in the Database Analysis dashboard. | +| dash_db_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Time Lag chart for Slony replication on the Database Analysis dashboard. | +| dash_db_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Time Lag line chart for slony replication is auto-refreshed on the Database Analysis dashboard. | +| dash_db_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity Analysis chart on the Database Analysis dashboard. | +| dash_db_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is auto-refreshed in the Database Analysis dashboard. | +| dash_efm_timeout | 300 seconds | Specifies the number of seconds after which the Failover Manager Node Status and Failover Manager Cluster Info line chart is auto-refreshed on the Streaming Replication dashboard. | +| dash_global_overview_timeout | 30 seconds | Specifies the number of seconds after which the components of the Global Overview dashboard are auto-refreshed. | +| dash_header_timeout | 60 seconds | Specifies the number of seconds after which the information on the header of all the dashboards are auto-refreshed. | +| dash_io_chkpt_span | 7 days | Specifies the number of days worth of data to plot on the Checkpoints chart on the I/O Analysis dashboard. | +| dash_io_chkpt_timeout | 1800 seconds | Specifies the number of seconds after which the Checkpoints line chart is auto-refreshed on the I/O Analysis dashboard. | +| dash_io_hotindx_timeout | 300 seconds | Specifies the number of seconds after which the Hot Indexes bar chart is auto-refreshed on the I/O Analysis dashboard. | +| dash_io_hottbl_timeout | 300 seconds | Specifies the number of seconds after which the Hot Tables bar chart is auto-refreshed on the I/O Analysis dashboard. | +| dash_io_index_objectio_rows | 25 rows | Specifies the number of rows displayed on the Index Activity table on the I/O Analysis dashboard and the Object Activity Analysis dashboard. | +| dash_io_index_objectio_timeout | 60 seconds | Specifies the number of seconds after which the Index Activity table is auto-refreshed on the I/O Analysis dashboard and the Object Activity Analysis dashboard. | +| dash_io_objectio_rows | 25 rows | Specifies the number of rows displayed in the Object I/O Details table on the I/O Analysis dashboard and Object Activity Analysis dashboard. | +| dash_io_objectio_timeout | 300 seconds | Specifies the number of seconds after which the Object I/O Details table is auto-refreshed on the I/O Analysis dashboard and Object Activity Analysis dashboard. | +| dash_memory_hostmemact_span | 7 days | Specifies the number of days worth of data to plot on the Host Memory Activity Analysis chart on the Memory Analysis dashboard. | +| dash_memory_hostmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Host Memory Activity line chart is auto-refreshed on the Memory Analysis dashboard. | +| dash_memory_hostmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Host Memory Configuration pie chart is auto-refreshed on the Memory Analysis dashboard and Server Analysis dashboard. | +| dash_memory_servmemact_span | 7 days | Specifies the number of days worth of data to plot on the server Memory Activity Analysis chart on the Memory Analysis dashboard. | +| dash_memory_servmemact_timeout | 1800 seconds | Specifies the number of seconds after which the Server Memory Activity line chart is auto-refreshed on the Memory Analysis dashboard. | +| dash_memory_servmemconf_timeout | 300 seconds | Specifies the number of seconds after which the Server Memory Configuration pie chart is auto-refreshed on the Memory Analysis dashboard. | +| dash_objectact_objstorage_rows | 15 rows | Specifies the number of rows to show on the Object Storage table on the Object Activity Analysis dashboard. | +| dash_objectact_objstorage_timeout | 300 seconds | Specifies the number of seconds after which the Object Storage table is auto-refreshed in the Object Activity Analysis dashboard. | +| dash_objectact_objtopindexes_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Indexes bar chart is auto-refreshed in the Object Activity Analysis dashboard. | +| dash_objectact_objtoptables_timeout | 300 seconds | Specifies the number of seconds after which the Top 5 Largest Tables bar chart is auto-refreshed in the Object Activity Analysis dashboard. | +| dash_os_cpu_span | 7 days | Specifies the number of days worth of data to plot on the CPU chart on the Operating System Analysis dashboard. | +| dash_os_cpu_timeout | 1800 seconds | Specifies the number of seconds after which the CPU line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_data_span | 7 days | Specifies the number of days worth of data to plot on the I/O line chart on the Operating System Analysis dashboard. | +| dash_os_disk_span | 7 days | Specifies the number of days worth of data to plot on the Utilisation chart on the Operating System Analysis dashboard. | +| dash_os_hostfs_timeout | 1800 seconds | Specifies the number of seconds after which the Host File System Details table is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_io_timeout | 1800 seconds | Specifies the number of seconds after which the I/O line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_memory_span | 7 days | Specifies the number of days worth of data to plot on the Memory chart on the Operating System Analysis dashboard. | +| dash_os_memory_timeout | 1800 seconds | Specifies the number of seconds after which the Memory line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_packet_span | 7 days | Specifies the number of days worth of data to plot on the Packet chart on the Operating System Analysis dashboard. | +| dash_os_packet_timeout | 1800 seconds | Specifies the number of seconds after which the Network Packets line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_process_span | 7 days | Specifies the number of days worth of data to plot on the Process chart on the Operating System Analysis dashboard. | +| dash_os_process_timeout | 1800 seconds | Specifies the number of seconds after which the Process line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_storage_timeout | 1800 seconds | Specifies the number of seconds after which the Storage pie chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_traffic_span | 7 days | Specifies the number of days worth of data to plot on the Traffic chart on the Operating System Analysis dashboard. | +| dash_os_traffic_timeout | 1800 seconds | Specifies the number of seconds after which the Traffic line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_os_util_timeout | 1800 seconds | Specifies the number of seconds after which the Utilisation line chart is auto-refreshed on the Operating System Analysis dashboard. | +| dash_probe_log_timeout | 300 seconds | Specifies the number of seconds after which the Probe Log table is auto-refreshed on the Probe Log Analysis dashboard. | +| dash_replication_archivestat_span | 7 days | Specifies the number of days worth of data to plot on the WAL Archive Status chart on the Streaming Replication Analysis dashboard. | +| dash_replication_archivestat_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Archive Status line chart is auto-refreshed on the Streaming Replication dashboard. | +| dash_replication_pagelag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Pages chart on the Streaming Replication dashboard. | +| dash_replication_pagelag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Pages line chart is auto-refreshed on the Streaming Replication dashboard. | +| dash_replication_segmentlag_span | 7 days | Specifies the number of days worth of data to plot on the WAL Lag Segments chart on the Streaming Replication dashboard. | +| dash_replication_segmentlag_timeout | 1800 seconds | Specifies the number of seconds after which the WAL Lag Segments line chart is auto-refreshed on the Streaming Replication dashboard. | +| dash_replication_timelag_span | 7 days | Specifies the number of days worth of data to plot on the Replication Lag Time chart on the Streaming Replication dashboard. | +| dash_replication_timelag_timeout | 1800 seconds | Specifies the number of seconds after which the Replication Lag Time line chart is auto-refreshed on the Streaming Replication dashboard. | +| dash_server_buffers_written | 168 hours | Specifies the number of days worth of data to plot on the Background Writer Statistics chart on the Server Analysis dashboard. | +| dash_server_buffers_written_timeout | 300 seconds | Specifies the number of seconds after which the Background Writer Statistics line chart is auto-refreshed on the Server Analysis dashboard. | +| dash_server_connovervw_timeout | 300 seconds | Specifies the number of seconds after which the Connection Overview pie chart is auto-refreshed in the Server Analysis dashboard. | +| dash_server_database_timeout | 300 seconds | Specifies the number of seconds after which the Databases table is auto-refreshed in the Server Analysis dashboard. | +| dash_server_dbsize_span | 7 days | Specifies the number of days worth of data to plot on the Database Size Analysis chart on the Server Analysis dashboard. | +| dash_server_dbsize_timeout | 1800 seconds | Specifies the number of seconds after which the Database Size line chart is auto-refreshed in the Server Analysis dashboard. | +| dash_server_disk_timeout | 1800 seconds | Specifies the number of seconds after which the Disk line chart is auto-refreshed in the Server Analysis dashboard. | +| dash_server_global_span | 7 days | Specifies the number of days worth of data to plot on the Disk line chart on the Server Analysis dashboard. | +| dash_server_sharedbuff_span | 7 days | Specifies the number of days worth of data to plot on the Shared Buffer chart on the Server Analysis dashboard. | +| dash_server_sharedbuff_timeout | 1800 seconds | Specifies the number of seconds after which the Shared Buffers line chart is auto-refreshed in the Server Analysis dashboard. | +| dash_server_tabspacesize_span | 7 days | Specifies the number of days worth of data to plot on the Tablespace Size chart on the Server Analysis dashboard. | +| dash_server_tabspacesize_timeout | 1800 seconds | Specifies the number of seconds after which the Tablespace Size line chart is auto-refreshed in the Server Analysis dashboard. | +| dash_server_useract_span | 7 days | Specifies the number of days worth of data to plot on the User Activity chart on the Server Analysis dashboard. | +| dash_server_useract_timeout | 1800 seconds | Specifies the number of seconds after which the User Activity line chart is auto-refreshed in the Server Analysis dashboard. | +| dash_sessact_lockact_timeout | 300 seconds | Specifies the number of seconds after which the Session Lock Activity table is auto-refreshed in the Session Activity Analysis dashboard. | +| dash_sessact_workload_timeout | 300 seconds | Specifies the number of seconds after which the Session Workload table is auto-refreshed in the Session Activity Analysis dashboard. | +| dash_sess_waits_nowaits_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits By Number Of Waits pie chart is auto-refreshed in the Session Waits Analysis dashboard. | +| dash_sess_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits By Time Waited pie chart is auto-refreshed in the Session Waits Analysis dashboard. | +| dash_sess_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the Session Waits Details table is auto-refreshed in the Session Waits Analysis dashboard. | +| dash_storage_dbdtls_timeout | 300 seconds | Specifies the number of seconds after which the Database Details table is auto-refreshed in the Storage Analysis dashboard. | +| dash_storage_dbovervw_timeout | 300 seconds | Specifies the number of seconds after which the Database Overview pie chart is auto-refreshed in the Storage Analysis dashboard. | +| dash_storage_hostdtls_timeout | 300 seconds | Specifies the number of seconds after which the Host Details table is auto-refreshed in the Storage Analysis dashboard. | +| dash_storage_hostovervw_timeout | 300 seconds | Specifies the number of seconds after which the Host Overview pie chart is auto-refreshed in the Storage Analysis dashboard. | +| dash_storage_tblspcdtls_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Details table is auto-refreshed in the Storage Analysis dashboard. | +| dash_storage_tblspcovervw_timeout | 300 seconds | Specifies the number of seconds after which the Tablespace Overview pie chart is auto-refreshed in the Storage Analysis dashboard. | +| dash_sys_waits_nowaits_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Number Of Waits pie chart is auto-refreshed in the System Waits Analysis dashboard. | +| dash_sys_waits_timewait_timeout | 300 seconds | Specifies the number of seconds after which the System Waits By Time Waited pie chart is auto-refreshed in the System Waits Analysis dashboard. | +| dash_sys_waits_waitdtl_timeout | 300 seconds | Specifies the number of seconds after which the System Waits Details table is auto-refreshed in the System Waits Analysis dashboard. | +| deleted_charts_retention_time | 7 days | Specifies the number of days that a custom chart (displayed on a user-defined dashboard) is stored. | +| deleted_probes_retention_time | 7 days | Specifies the number of days that a custom probe (displayed on a user-defined dashboard) is stored. | +| download_chart_format | jpeg | Specifies the format in which a downloaded chart will be stored. May be jpeg or png. | +| flapping_detection_state_change | 3 | Specifies the number of state changes detected within a specified interval to define a given alert as flapping.- Flapping starts when more than `N` state changes have occurred over \[ `N` + 1 \* (min(probe_interval) \* 2)] minutes and the fine state is not None. Where the default value of `N` is 2 or 3, and min(probe_interval) is the smallest interval for all the probes used by the alert.
- Flapping ends when ZERO state changes have occurred over \[2 `N` \* min(probe_interval)] minutes.
| +| job_retention_time | 30 days | Specifies the number of days that non-recurring scheduled tasks and their associated logs are retained, after their execution time. | +| long_running_transaction_minutes | 5 minutes | Specifies the number of minutes a query executes for before being considered long running. | +| nagios_cmd_file_name | <file_name> | Specifies nagios command file to which passive service check result will be sent. | +| nagios_enabled | t | Specifies whether alert notification will be submitted to nagios or not. | +| nagios_medium_alert_as_critical | f | Specifies whether medium level PEM alert will be considered as critical in nagios. | +| nagios_spool_retention_time | 7 days | Specifies the number of days to retain nagios messages in the spool table before they are discarded. | +| reminder_notification_interval | 24 hours | Specifies the number of hours after which a reminder email is sent in case an alert has not been cleared. | +| server_log_retention_time | 30 days | Specifies the number of days that the server log is retained on the PEM server. | +| show_data_tab_on_graph | false | If 'true', a Data tab is added to each graph. Select the Data tab to review the data that is plotted on the graph. | +| smtp_authentication | false | Specifies whether to enable/disable authentication over SMTP. | +| smtp_enabled | true | Specifies whether to enable/disable sending of emails. | +| smtp_encryption | false | Specifies whether to send SMTP email using an encrypted connection. | +| smtp_password | | Specifies the password to be used to connect to the SMTP server. | +| smtp_port | 25 | Specifies the SMTP server port to be used for sending email. | +| smtp_server | 127.0.0.1 | Specifies the SMTP server host address to be used for sending email. | +| smtp_spool_retention_time | 7 days | Specifies the number of days to retain sent email messages in the spool table before they are discarded. | +| smtp_username | | Specifies the username to be used to connect to SMTP server. | +| snmp_community | public | Specifies the SNMP community used when sending traps. Used only with SNMPv1 and SNMPv2. | +| snmp_enabled | true | Specifies whether to enable/disable sending SNMP traps. | +| snmp_port | 162 | Specifies the SNMP server port to be used for sending SNMP traps. | +| snmp_server | 127.0.0.1 | Specifies the SNMP server host address to be used for sending SNMP traps. | +| snmp_spool_retention_time | 7 days | Specifies the number of days to retain sent traps in the spool table before they are discarded. | +| snmp_security_name | | Specifies the user name or security name for sending SNMP traps. Used only with SNMPv3. | +| snmp_security_engine_id | | Specifies the Engine id of the SNMP Agent on the SNMP Server. Used only with SNMPv3. | +| snmp_security_level | NOAUTH_NOPRIV | Specifies Security level and its possible values can be: AUTH_NOPRIV - Authentication, No Privacy AUTH_PRIV - Authentication, Privacy NOAUTH_NOPRIV - no Authentication, no Privacy. Used only with SNMPv3. | +| snmp_context_name | | Specifies the Context name, the identifier for MIB objects when sending SNMP traps. Used only with SNMPv3. | +| snmp_context_engine_id | | Specifies the Context engine id, the identifier for MIB objects when sending SNMP traps. If not specified, snmp_security_engine_id will be used. Used only with SNMPv3. | +| snmp_authentication_protocol | NONE | Specifies the authentication type for SNMP traps. Its possible values can be NONE, HMACMD5 or HMACSHA. Used only with SNMPv3. | +| snmp_privacy_protocol | NONE | Specifies the privacy protocol for SNMP traps. Its possible values can be NONE, DES, AES128, IDEA, AES192, or AES256. Used only with SNMPv3. | +| snmp_authentication_password | | Specifies the authentication password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. | +| snmp_privacy_password | | Specifies the privacy password associated with security name mentioned in snmp_security_name. Used only for SNMPv3. | +| webclient_help_pg | EnterpriseDB hosted documentation | Specifies the location of the online PostgreSQL core documentation. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/index.mdx index b6c39279845..6ceebe4453c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/02_pem_server_config/index.mdx @@ -4,7 +4,6 @@ title: "Server Configuration" - You can use the `Server Configuration` dialogue to modify values of user-configurable parameters that control the behavior of Postgres Enterprise Manager. To access the `Server Configuration` dialog, connect to the PEM server, and select `Server Configuration...` from the `File` menu. ![Server Configuration dialogue](../../images/pem_server_config.png) @@ -13,12 +12,12 @@ Enter a parameter name in the search box in the upper-right corner of the dialog To modify a parameter value, edit the content displayed in the `Value` field to the right of a parameter name. Click the `Save` icon in the upper-right corner of the dialog to save your changes, or click the `Close` button to exit the dialog without applying the changes. -A list of configuration options may be found [here](../04_toc_pem_features/02_pem_server_config/#pem_config_options). +A list of configuration options may be found [here](01_pem_config_options/#pem_config_options). Contents:
-pem\_config\_options +pem_config_options
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/03_log_manager.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/03_log_manager.mdx index 09cd7eac33d..aab5e443166 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/03_log_manager.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/03_log_manager.mdx @@ -4,7 +4,6 @@ title: "Log Manager" - Use the Log Manager wizard to specify logging preferences for a Postgres database server. Log Manager supports Advanced Server and PostgreSQL versions 9.0 (and later). The Log Manager wizard assists in modifying configuration parameters that control: - Where log files are written. @@ -91,7 +90,7 @@ Click `Next` to continue: Use the fields on the `When to Log` dialog to specify which events will initiate a log file entry. The severity levels (in order of severity, from most severe to least severe) are: | | | -|-----------------------|------------------------------------------------------------------------------------| +| --------------------- | ---------------------------------------------------------------------------------- | | Severity | Description | | panic | Errors that cause all database sessions to abort. | | fatal | Errors that cause a session to abort. | @@ -147,7 +146,26 @@ Use the switches in the `General options` box to instruct the server to include You can include: -
EscapeInformationSession/Helper

%a

Application NameSession

%u

User NameSession

%d

Database NameSession

%r

Remote host name or IP address, and remote portSession

%h

Remote host name or IP addressSession

%p

Process IDHelper

%t

Time stamp without millisecondsHelper

%m

Time stamp with millisecondsHelper
%iCommand tag: type of statement that generated the log entrySession

%e

SQLSTATE error codeHelper

%c

Session identifierHelper

%l

Line number of the log entryHelper

%s

Process start time stampHelper

%v

Virtual transaction ID (backendID/localXID)Helper

%x

Transaction ID (0 if not assigned)Helper
%qProduces no output, but instructs non-session processes to stop at this point in the string; will be ignored by session processesHelper

%%

Literal %Helper
+| | | | +| -------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| Escape | Information | Session/Helper | +|


%a

| Application Name | Session | +|


%u

| User Name | Session | +|


%d

| Database Name | Session | +|


%r

| Remote host name or IP address, and remote port | Session | +|


%h

| Remote host name or IP address | Session | +|


%p

| Process ID | Helper | +|


%t

| Time stamp without milliseconds | Helper | +|


%m

| Time stamp with milliseconds | Helper | +| %i | Command tag: type of statement that generated the log entry | Session | +|


%e

| SQLSTATE error code | Helper | +|


%c

| Session identifier | Helper | +|


%l

| Line number of the log entry | Helper | +|


%s

| Process start time stamp | Helper | +|


%v

| Virtual transaction ID (backendID/localXID) | Helper | +|


%x

| Transaction ID (`0` if not assigned) | Helper | +| %q | Produces no output, but instructs non-session processes to stop at this point in the string; will be ignored by session processes | Helper | +|


%%

| Literal % | Helper | - Use the `Statements` drop-down list box to specify which SQL statements will be included in the server log. The default is `none`; valid options are: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/04_audit_manager.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/04_audit_manager.mdx index 8665d9d8766..d076c4bec47 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/04_audit_manager.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/04_audit_manager.mdx @@ -4,8 +4,7 @@ title: "Audit Manager" - -You can use the PEM Audit manager to configure, enable, and disable audit logging of EDB Postgres Advanced Server instances. The Audit manager also enables audit log collection, allowing you to view log data on the [Audit Log Dashboard](../04_toc_pem_features/01_dashboards/#audit_log_dashboard). +You can use the PEM Audit manager to configure, enable, and disable audit logging of EDB Postgres Advanced Server instances. The Audit manager also enables audit log collection, allowing you to view log data on the [Audit Log Dashboard](01_dashboards/02_audit_log_dashboard/#audit_log_dashboard). To run the Audit manager wizard, select `Audit manager...` from the PEM client `Management` menu. Audit manager opens, displaying the `Welcome` dialog: @@ -15,7 +14,7 @@ Click `Next` to continue: ![Audit Manager Wizard - Select Servers page](../images/audit_manager_servers.png) -Use the `Select servers` tree control to specify the servers to which the auditing configuration will be applied. To make a server available in the tree control, you must provide the `Service ID` on the PEM [Server](../01_toc_pem_getting_started/#pem_define_connection) dialog. Note that only EDB Postgres Advanced Server supports auditing; PostgreSQL servers will not be included in the tree control. +Use the `Select servers` tree control to specify the servers to which the auditing configuration will be applied. To make a server available in the tree control, you must provide the `Service ID` on the PEM [Server](../01_toc_pem_getting_started/07_pem_define_connection/#pem_define_connection) dialog. Note that only EDB Postgres Advanced Server supports auditing; PostgreSQL servers will not be included in the tree control. Click `Next` to continue: @@ -73,4 +72,4 @@ Use the `Schedule auditing changes` dialog to specify when the new configuration Click the `Finish` button to schedule a job to apply the configuration to each server. The job will consist of two tasks. One task will update the audit logging configuration on the server, and one task will restart the server with the new configuration. -The scheduled jobs can be viewed in the [Task Viewer](../04_toc_pem_features/18_pem_task_view/#pem_task_view), and the results in the [Log Viewer](../04_toc_pem_features/18_pem_task_view/#pem_log_view) when opened from the appropriate server or agent. +The scheduled jobs can be viewed in the [Task Viewer](18_pem_task_view/#pem_task_view), and the results in the [Log Viewer](18_pem_task_view/01_pem_log_view/#pem_log_view) when opened from the appropriate server or agent. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/05_pem_log_analysis_expert.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/05_pem_log_analysis_expert.mdx index 90f463d1d62..4dc4ca852d7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/05_pem_log_analysis_expert.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/05_pem_log_analysis_expert.mdx @@ -4,7 +4,6 @@ title: "Postgres Log Analysis Expert" - The Postgres Log Analysis Expert analyzes the log files of servers that are registered with PEM, and produces a report that provides an overview of your Postgres cluster's usage based on log file entries. You can use information on the Log Analysis Expert reports to make decisions about optimizing your cluster usage and configuration to improve performance. Before invoking the Postgres Log Analysis Expert, you must specify the `Service ID` on the `Advanced` tab of the server's properties dialog, and use the Log Manager wizard to enable log collection by the PEM server. To invoke the Log Manager wizard, select the `Log Manager...` option from the `Management` menu; check the box next to `Import logs to PEM` in the `Import Logs` panel of the wizard to enable log collection. @@ -74,7 +73,7 @@ The report may include one or more of the following: - The `Average sessions duration` row displays the average length of each session. - The `Total number of connections` row displays the number of user connections made to the server. - The `Total number of databases` row displays the number of databases on the selected server. -- The `Hourly DML Statistics` table displays the statistics related to the use of various DML commands (SELECT, INSERT, UPDATE, DELETE, COPY and FETCH) within a one-hour period. To generate values in the `Min Duration(sec)`, `Max Duration(sec)`, and `Avg Duration(sec)` columns of this table, you must specify a value greater than or equal to `0` in the log\_min\_duration\_statement configuration parameter. You can set the parameter by either modifying the `postgresql.conf` file with your editor of choice, or by specifying a value of `0` or greater in the `Log Min Duration Statement` field of the `Log Manager` wizard. +- The `Hourly DML Statistics` table displays the statistics related to the use of various DML commands (SELECT, INSERT, UPDATE, DELETE, COPY and FETCH) within a one-hour period. To generate values in the `Min Duration(sec)`, `Max Duration(sec)`, and `Avg Duration(sec)` columns of this table, you must specify a value greater than or equal to `0` in the log_min_duration_statement configuration parameter. You can set the parameter by either modifying the `postgresql.conf` file with your editor of choice, or by specifying a value of `0` or greater in the `Log Min Duration Statement` field of the `Log Manager` wizard. - The `Time` column displays the start of the one-hour period for which data was analyzed. - The `Database` column displays the name of the database in which the specified DML command executed. - The `Command Type` column displays the DML command type. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/06_tuning_wizard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/06_tuning_wizard.mdx index daa84263c43..ee9cbc84514 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/06_tuning_wizard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/06_tuning_wizard.mdx @@ -4,10 +4,9 @@ title: "Tuning Wizard" - The Tuning Wizard reviews your PostgreSQL or Advanced Server installation, and recommends a set of configuration options that will help tune the installation to best suit its anticipated workload. Please note that benchmarking systems or systems with a high work load may require additional manual tuning to reach optimum performance. -Before using the Tuning Wizard, you must specify the name of the service in the `Service ID` field on the `Advanced` tab of the server's `` `Properties `` <pem\_define\_connection>\` dialog. PEM will use the service name when restarting the service after tuning. +Before using the Tuning Wizard, you must specify the name of the service in the `Service ID` field on the `Advanced` tab of the server's `` `Properties `` <pem_define_connection>\` dialog. PEM will use the service name when restarting the service after tuning. The Tuning Wizard can only make recommendations for those servers that reside on the same server as their bound PEM agent. If you have specified a value of `Yes` in the `Remote monitoring` field when defining your server, the server will not be displayed in the Tuning Wizard tree control. @@ -83,8 +82,8 @@ You can confirm that Tuning Wizard has implemented the recommended changes by re ![Tuning Wizard Change Confirmation](../images/tuning_wiz_confirm_chg.png) -You can also confirm a parameter value by querying the server. For example, to confirm the value of the shared\_buffers parameter, open a SQL command line using either the `Query Tool` (accessed through the `Tools` menu) or the psql client, and issue the command: +You can also confirm a parameter value by querying the server. For example, to confirm the value of the shared_buffers parameter, open a SQL command line using either the `Query Tool` (accessed through the `Tools` menu) or the psql client, and issue the command: -*SHOW shared\_buffers;* +*SHOW shared_buffers;* The value returned by the server will confirm that the parameter has been modified. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/01_pe_schema_config_expert_recommendations.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/01_pe_schema_config_expert_recommendations.mdx index aba6c9f01f3..02ec09559ec 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/01_pe_schema_config_expert_recommendations.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/01_pe_schema_config_expert_recommendations.mdx @@ -4,9 +4,8 @@ title: "Schema Expert Recommendations" - | | | -|----------------|---------------------------------------------------------------| +| -------------- | ------------------------------------------------------------- | | Rule | Check for missing primary keys | | Recommendation | Ensure tables have a primary key | | Trigger | Postgres Expert detected a table with no defined primary key. | @@ -15,7 +14,7 @@ title: "Schema Expert Recommendations" **Description:** Primary keys are used to define the set of columns that make up the unique key to each row in the table. Whilst they are similar to unique indexes, primary keys cannot contain NULL values, thus are always able to identify a single row. Tools such as Postgres Enterprise Manager and other pieces of software such as ORM will automatically detect primary keys on tables and use their definition to identify individual rows. | | | -|----------------|--------------------------------------------------------------------------------| +| -------------- | ------------------------------------------------------------------------------ | | Rule | Check for missing foreign key indexes | | Recommendation | Ensure columns of child tables in foreign key relationships are indexed. | | Trigger | Postgres Expert detected a child table with no index on referencing column(s). | @@ -23,17 +22,17 @@ title: "Schema Expert Recommendations" **Description:** Foreign keys are used to define and enforce relationships between child and parent tables. The foreign key specifies that values in one or more columns of the child table must exist (in the same combination, if more than one column) in the referenced column(s) of the parent table. A unique index is required to be present on the referenced columns in the parent table, however an index is not required, but is generally advisable, on the referencing columns of the child table to allow cascading updates to the parent to be executed efficiently. -| | | -|----------------|--------------------------------------------| -| Rule | Check Database Encoding | -| Recommendation | Avoid encoding as SQL\_ASCII for databases | -| Trigger | encoding = SQL\_ASCII | -| Severity | Medium | +| | | +| -------------- | ----------------------------------------- | +| Rule | Check Database Encoding | +| Recommendation | Avoid encoding as SQL_ASCII for databases | +| Trigger | encoding = SQL_ASCII | +| Severity | Medium | -**Description:** The database is created to store data using the SQL\_ASCII encoding. This encoding is defined for 7 bit characters only; the meaning of characters with the 8th bit set (non-ASCII characters 127-255) is not defined. Consequently, it is not possible for the server to convert the data to other encodings. If you're storing non-ASCII data in the database, you're strongly encouraged to use a proper database encoding representing your locale character set to take benefit from the automatic conversion to different client encodings when needed. If you store non-ASCII data in an SQL\_ASCII database, strange characters may be written to or read from the database, caused by code conversion problems. This may cause problems when accessing the database using different client programs and drivers. For most installations, Unicode (UTF8) encoding will provide the most versatility. +**Description:** The database is created to store data using the SQL_ASCII encoding. This encoding is defined for 7 bit characters only; the meaning of characters with the 8th bit set (non-ASCII characters 127-255) is not defined. Consequently, it is not possible for the server to convert the data to other encodings. If you're storing non-ASCII data in the database, you're strongly encouraged to use a proper database encoding representing your locale character set to take benefit from the automatic conversion to different client encodings when needed. If you store non-ASCII data in an SQL_ASCII database, strange characters may be written to or read from the database, caused by code conversion problems. This may cause problems when accessing the database using different client programs and drivers. For most installations, Unicode (UTF8) encoding will provide the most versatility. | | | -|----------------|---------------------------------------------------------------------| +| -------------- | ------------------------------------------------------------------- | | Rule | Check for too many indexes | | Recommendation | Don't overload a table with too many indexes. | | Trigger | Postgres Expert has detected that a table has more than 10 indexes. | @@ -42,7 +41,7 @@ title: "Schema Expert Recommendations" **Description:** Whilst indexes can speed up SELECT queries by allowing Postgres to quickly locate records, it is important to choose which indexes are required carefully to ensure they are used. Maintaining an index has a cost, and the more indexes there are to update, the slower INSERT, UPDATE or DELETE queries can become. There are no hard and fast rules to tell you how many indexes are required on a particular table -the DBA must balance the need for indexes for different types of SELECT queries and constraints against the cost of maintaining them. | | | -|--------------------|--------------------------------------------------------------------------------------------------| +| ------------------ | ------------------------------------------------------------------------------------------------ | | Configuration Item | Check data and transaction log on same drive | | Recommendation | Avoid using the same storage device for the data directory and transaction logs. | | Trigger | Postgres Expert has detected that a data directory and transaction log directory share a device. | @@ -50,17 +49,17 @@ title: "Schema Expert Recommendations" **Description:** Postgres' performance can be adversely affected on medium to heavily loaded systems if both the data and the transaction logs (WAL) are stored on the same device. It is considered good practice to store them on separate physical devices if performance is an issue. On busy servers, significant performance gains may be seen when separating the data directory and transaction log directory onto different physical storage devices. -| | | -|----------------|---------------------------------------------------------------------------------------------------------------------| -| Rule | Check tablespace and transaction log on same drive | -| Recommendation | Avoid using the same storage device for the transaction logs and a tablespace. | -| Trigger | Postgres Expert has detected that transaction log directory and a tablespace other than pg\_default share a device. | -| Severity | Medium | +| | | +| -------------- | ------------------------------------------------------------------------------------------------------------------ | +| Rule | Check tablespace and transaction log on same drive | +| Recommendation | Avoid using the same storage device for the transaction logs and a tablespace. | +| Trigger | Postgres Expert has detected that transaction log directory and a tablespace other than pg_default share a device. | +| Severity | Medium | **Description:** Before updating database files to reflect data modifications, the server writes the change to the transaction log. The database files may be separated onto different devices using tablespaces (defined storage areas used by the database server). On busy servers, significant performance gains may be seen when separating tablespace directories and the transaction log directory onto different physical storage devices. | | | -|----------------|------------------------------------------------------------------------| +| -------------- | ---------------------------------------------------------------------- | | Rule | Check multiple tablespace on same drive | | Recommendation | Avoid using the same storage device for multiple tablespaces. | | Trigger | Postgres Expert has detected that multiple tablespaces share a device. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/02_pe_security_expert_recommendations.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/02_pe_security_expert_recommendations.mdx index bf175c4928d..6dc59213466 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/02_pe_security_expert_recommendations.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/02_pe_security_expert_recommendations.mdx @@ -4,31 +4,30 @@ title: "Security Expert Recommendations" - -| | | -|----------------|---------------------------------------------------------------------| -| Rule | Check SSL for improved performance | -| Recommendation | Consider disabling SSL for improved performance. | -| Trigger | ssl = on and listen\_addresses in ('localhost', '127.0.0.1', '::1') | -| Severity | Low | +| | | +| -------------- | ------------------------------------------------------------------ | +| Rule | Check SSL for improved performance | +| Recommendation | Consider disabling SSL for improved performance. | +| Trigger | ssl = on and listen_addresses in ('localhost', '127.0.0.1', '::1') | +| Severity | Low | **Description:** SSL authentication is invaluable for protecting against connection-spoofing and eavesdropping attacks, but it is not always necessary for adequate security. When PostgreSQL accepts only local connections, or when it accepts only connections from a trusted network where malicious network traffic is not a concern, SSL encryption may not be necessary. Consider changing this setting if the current value is not appropriate for your environment. Note: Even when SSL encryption is enabled, PostgreSQL servers should be further protected using an appropriate firewall configuration. -| | | -|----------------|-------------------------------------------------------------------------| -| Rule | Check SSL for improved connection security | -| Recommendation | Consider using SSL for improved connection security. | -| Trigger | ssl = off and listen\_addresses not in ('localhost', '127.0.0.1','::1') | -| Severity | Medium | +| | | +| -------------- | ---------------------------------------------------------------------- | +| Rule | Check SSL for improved connection security | +| Recommendation | Consider using SSL for improved connection security. | +| Trigger | ssl = off and listen_addresses not in ('localhost', '127.0.0.1','::1') | +| Severity | Medium | -**Description:** The configuration variable listen\_addresses indicates that your system may accept non-local connection requests, but SSL is not enabled. If PostgreSQL is exposed only to a secure, trusted internal network, this configuration is appropriate for maximum performance. Otherwise, you should consider enabling SSL. SSL offers two main advantages. First, it provides a more secure mechanism for authorizing connections to the database, helping to prevent unauthorized access. Second, SSL prevents eavesdropping attacks, where data sent from the database to clients, or from clients to the database, is viewed by an attacker while in transit. Consider changing this setting if the current value is not appropriate for your environment. +**Description:** The configuration variable listen_addresses indicates that your system may accept non-local connection requests, but SSL is not enabled. If PostgreSQL is exposed only to a secure, trusted internal network, this configuration is appropriate for maximum performance. Otherwise, you should consider enabling SSL. SSL offers two main advantages. First, it provides a more secure mechanism for authorizing connections to the database, helping to prevent unauthorized access. Second, SSL prevents eavesdropping attacks, where data sent from the database to clients, or from clients to the database, is viewed by an attacker while in transit. Consider changing this setting if the current value is not appropriate for your environment. Note: Even when SSL encryption is enabled, PostgreSQL servers should be further protected using an appropriate firewall configuration. | | | -|----------------|-------------------------------------------------------------------------------| +| -------------- | ----------------------------------------------------------------------------- | | Rule | Check TRUST authentication is disabled | | Recommendation | Avoid trust and ident authentication on unsecured networks. | | Trigger | trust or ident authentication allowed to any host other than 127.0.0.1 or ::1 | @@ -36,20 +35,20 @@ Note: Even when SSL encryption is enabled, PostgreSQL servers should be further **Description:** An attacker with access to your network can easily use the trust and ident authentication methods to subvert your network. If PostgreSQL is not running on a secure network, with firewalls in place to prevent malicious traffic, the use of these authentication methods should be avoided. -| | | -|----------------|---------------------------------------------------------------------------------------| -| Rule | Check Password authentication on unsecured networks | -| Recommendation | Avoid password authentication on unsecured networks. | -| Trigger | (connection\_type = 'host' or connection\_type = 'hostnossl') and method = 'password' | -| Severity | High | +| | | +| -------------- | ----------------------------------------------------------------------------------- | +| Rule | Check Password authentication on unsecured networks | +| Recommendation | Avoid password authentication on unsecured networks. | +| Trigger | (connection_type = 'host' or connection_type = 'hostnossl') and method = 'password' | +| Severity | High | -**Description:** Passwords should not be transmitted in plaintext over unsecured networks. The use of md5 authentication provides slightly better security, but can still allow accounts to be compromised by a determined attacker. SSL encryption is a superior alternative. To require the use of SSL, set the connection type to hostssl in the pg\_hba.conf file. +**Description:** Passwords should not be transmitted in plaintext over unsecured networks. The use of md5 authentication provides slightly better security, but can still allow accounts to be compromised by a determined attacker. SSL encryption is a superior alternative. To require the use of SSL, set the connection type to hostssl in the pg_hba.conf file. -| | | -|----------------|-------------------------------------------------------------------| -| Rule | Check SSL for increased security | -| Recommendation | Consider requiring SSL. | -| Trigger | ssl = on in postgresql.conf, but no hostssl lines in pg\_hba.conf | -| Severity | Medium | +| | | +| -------------- | ---------------------------------------------------------------- | +| Rule | Check SSL for increased security | +| Recommendation | Consider requiring SSL. | +| Trigger | ssl = on in postgresql.conf, but no hostssl lines in pg_hba.conf | +| Severity | Medium | -**Description:** SSL encrypts passwords and all data transmitted over the connection, providing increased security. To require the use of SSL, set the connection type to hostssl in the pg\_hba.conf file. +**Description:** SSL encrypts passwords and all data transmitted over the connection, providing increased security. To require the use of SSL, set the connection type to hostssl in the pg_hba.conf file. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/03_pe_configuration_expert_recommendations.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/03_pe_configuration_expert_recommendations.mdx index c6bf7e0f21a..94961935f41 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/03_pe_configuration_expert_recommendations.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/03_pe_configuration_expert_recommendations.mdx @@ -4,61 +4,60 @@ title: "Configuration Expert Recommendations" +| | | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Rule | Check shared_buffers | +| Recommendation | Consider adjusting shared_buffers | +| Trigger | shared_buffers < (OS == Windows ? 64MB : MIN(0.20 \* (system_memory - 256MB), 6GB)) or shared_buffers > (OS == Windows ? 512MB : MAX(0.35 \* system_memory, 8GB)) | +| Recommended Value | system_memory < 1GB ? MAX((system_memory - 256MB) / (OS == Windows ? 6 : 3), 64MB), OS == Windows ? MAX(system_memory / 8, 256MB) : MAX(system_memory / 4, 8GB) | +| Severity | Medium | -| | | -|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Rule | Check shared\_buffers | -| Recommendation | Consider adjusting shared\_buffers | -| Trigger | shared\_buffers < (OS == Windows ? 64MB : MIN(0.20 \* (system\_memory - 256MB), 6GB)) or shared\_buffers > (OS == Windows ? 512MB : MAX(0.35 \* system\_memory, 8GB)) | -| Recommended Value | system\_memory < 1GB ? MAX((system\_memory - 256MB) / (OS == Windows ? 6 : 3), 64MB), OS == Windows ? MAX(system\_memory / 8, 256MB) : MAX(system\_memory / 4, 8GB) | -| Severity | Medium | +**Description:** The configuration variable shared_buffers controls the amount of memory reserved by PostgreSQL for its internal buffer cache. Setting this value too low may result in "thrashing" the buffer cache, resulting in excessive disk activity and degraded performance. However, setting it too high may also cause performance problems. PostgreSQL relies on operating system caching to a significant degree , and setting this value too high may result in excessive "double buffering" that can degrade performance. It also increases the internal costs of managing the buffer pool. On UNIX-like systems, a good starting value is approximately 25% of system memory, but not more than 8GB. On Windows systems, values between 64MB and 512MB typically perform best. The optimal value is workload-dependent, so it may be worthwhile to try several different values and benchmark your system to determine which one delivers best performance. -**Description:** The configuration variable shared\_buffers controls the amount of memory reserved by PostgreSQL for its internal buffer cache. Setting this value too low may result in "thrashing" the buffer cache, resulting in excessive disk activity and degraded performance. However, setting it too high may also cause performance problems. PostgreSQL relies on operating system caching to a significant degree , and setting this value too high may result in excessive "double buffering" that can degrade performance. It also increases the internal costs of managing the buffer pool. On UNIX-like systems, a good starting value is approximately 25% of system memory, but not more than 8GB. On Windows systems, values between 64MB and 512MB typically perform best. The optimal value is workload-dependent, so it may be worthwhile to try several different values and benchmark your system to determine which one delivers best performance. +Note: PostgreSQL will fail to start if the necessary amount of shared_memory cannot be located. This is usually due to an operating system limitation which can be raised by changing a system configuration setting, often called shmall.See the documentation for more details. You must set this limit to a value somewhat higher than the amount of memory required for shared_buffers,because PostgreSQL's shared memory allocation also includes amounts required for other purposes. -Note: PostgreSQL will fail to start if the necessary amount of shared\_memory cannot be located. This is usually due to an operating system limitation which can be raised by changing a system configuration setting, often called shmall.See the documentation for more details. You must set this limit to a value somewhat higher than the amount of memory required for shared\_buffers,because PostgreSQL's shared memory allocation also includes amounts required for other purposes. +| | | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Rule | Check work_mem | +| Recommendation | Consider adjusting work_mem | +| Trigger | given spare_mem = system_memory - (OS == Windows ? 256MB : MAX(0.25 \* system_memory, 8GB)) then work_mem < MAX(1MB, spare_mem / 512) or work_mem > (spare_mem / 128) | +| Recommended Value | given spare_mem defined as on the previous line then MAX (1MB, spare_mem / 256) | +| Severity | Medium | -| | | -|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Rule | Check work\_mem | -| Recommendation | Consider adjusting work\_mem | -| Trigger | given spare\_mem = system\_memory - (OS == Windows ? 256MB : MAX(0.25 \* system\_memory, 8GB)) then work\_mem < MAX(1MB, spare\_mem / 512) or work\_mem > (spare\_mem / 128) | -| Recommended Value | given spare\_mem defined as on the previous line then MAX (1MB, spare\_mem / 256) | -| Severity | Medium | +**Description:** The configuration variable work_mem controls the amount of memory PostgreSQL will use for each individual hash or sort operation. When a sort would use more than this amount of memory, the planner will arrange to perform an external sort using disk files. While this algorithm is memory efficient, it is much slower than an in-memory quick sort. Similarly, when a hash join would use more than this amount of memory, the planner will arrange to perform it in multiple batches, which saves memory but is likewise much slower. In either case, the planner may in the alternative choose some other plan that does not require the sort or hash operation, but this too is often less efficient. Therefore, for good performance it is important to set this parameter high enough to allow the planner to choose good plans. However, each concurrently executing query can potentially involve several sorts or hashes, and the number of queries on the system can vary greatly Therefore, a value for this setting that works well when the system is lightly loaded may result in swapping when the system becomes more heavily loaded. Swapping has very negative effects on database performance and should be avoided, so it is usually wise to set this value somewhat conservatively. -**Description:** The configuration variable work\_mem controls the amount of memory PostgreSQL will use for each individual hash or sort operation. When a sort would use more than this amount of memory, the planner will arrange to perform an external sort using disk files. While this algorithm is memory efficient, it is much slower than an in-memory quick sort. Similarly, when a hash join would use more than this amount of memory, the planner will arrange to perform it in multiple batches, which saves memory but is likewise much slower. In either case, the planner may in the alternative choose some other plan that does not require the sort or hash operation, but this too is often less efficient. Therefore, for good performance it is important to set this parameter high enough to allow the planner to choose good plans. However, each concurrently executing query can potentially involve several sorts or hashes, and the number of queries on the system can vary greatly Therefore, a value for this setting that works well when the system is lightly loaded may result in swapping when the system becomes more heavily loaded. Swapping has very negative effects on database performance and should be avoided, so it is usually wise to set this value somewhat conservatively. - -Note: work\_mem can be adjusted for particular databases, users, or user-and -database combinations by using the commands ALTER ROLE and ALTER DATABASE It can also be changed for a single session using the SET command. This can be helpful when particular queries can be shown to run much faster with a value of work\_mem that is too high to be applied to the system as a whole. +Note: work_mem can be adjusted for particular databases, users, or user-and -database combinations by using the commands ALTER ROLE and ALTER DATABASE It can also be changed for a single session using the SET command. This can be helpful when particular queries can be shown to run much faster with a value of work_mem that is too high to be applied to the system as a whole. | | | -|----------------|------------------------------------| -| Rule | Check max\_connections | +| -------------- | ---------------------------------- | +| Rule | Check max_connections | | Recommendation | Consider using a connection pooler | -| Trigger | max\_connections > 100 | +| Trigger | max_connections > 100 | | Severity | Medium | -**Description:** The configuration variable max\_connection is set to a value greater than 100. PostgreSQL performs best when the number of simultaneous connections is low. Peak throughput is typically achieved when the connection count is limited to is limited to approximately twice the number of system CPU cores plus the number of spindles available for disk I/O (in the case of an SSD or other non-rotating media, some experimentation may be needed to determine the "effective spindle count"). Installing a connection pooler, such as pgpool-II or pgbouncer, can allow many clients to be multiplexed onto a smaller number of server connections ,sometimes resulting in dramatic performance gains. +**Description:** The configuration variable max_connection is set to a value greater than 100. PostgreSQL performs best when the number of simultaneous connections is low. Peak throughput is typically achieved when the connection count is limited to is limited to approximately twice the number of system CPU cores plus the number of spindles available for disk I/O (in the case of an SSD or other non-rotating media, some experimentation may be needed to determine the "effective spindle count"). Installing a connection pooler, such as pgpool-II or pgbouncer, can allow many clients to be multiplexed onto a smaller number of server connections ,sometimes resulting in dramatic performance gains. -| | | -|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Rule | Check maintenance\_work\_mem | -| Recommendation | Consider adjusting maintenance\_work\_mem | -| Trigger | spare\_mem = system\_memory - (OS == Windows ? 256MB : MAX(0.25 \* system\_memory, 8GB)) then maintenance\_work\_mem < MAX(16MB, spare\_mem / 32) or maintenance\_work\_mem > MIN(spare\_mem / 8, 256MB) | -| Recommended Value | spare\_mem as defined on the previous line then MIN(spare\_mem/16, 256MB) | -| Severity | Low | +| | | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Rule | Check maintenance_work_mem | +| Recommendation | Consider adjusting maintenance_work_mem | +| Trigger | spare_mem = system_memory - (OS == Windows ? 256MB : MAX(0.25 \* system_memory, 8GB)) then maintenance_work_mem < MAX(16MB, spare_mem / 32) or maintenance_work_mem > MIN(spare_mem / 8, 256MB) | +| Recommended Value | spare_mem as defined on the previous line then MIN(spare_mem/16, 256MB) | +| Severity | Low | -**Description:** The configuration variable maintenance\_work\_mem controls the amount of memory PostgreSQL will use for maintenance operations such as CREATE INDEX and VACUUM. Increasing this setting from the default of 16MB to 256MB can make these operations run much faster. Higher settings typically do not produce a significant further improvement. On PostgreSQL 8.3 and higher, multiple autovacuum processes may be running at one time (up to autovacuum\_max\_workers, which defaults to 3), and each such process will use the amount of dedicated memory dictated by this parameter. This should be kept in mind when setting this parameter, especially on systems with relatively modest amounts of physical memory, so as to avoid swapping. Swapping has very negative effects on database performance and should be avoided. If the value recommended above is less than 256MB, it is chosen with this consideration in mind. However, the optimal value is workload-dependent, so it may be worthwhile to experiment with higher or lower settings. +**Description:** The configuration variable maintenance_work_mem controls the amount of memory PostgreSQL will use for maintenance operations such as CREATE INDEX and VACUUM. Increasing this setting from the default of 16MB to 256MB can make these operations run much faster. Higher settings typically do not produce a significant further improvement. On PostgreSQL 8.3 and higher, multiple autovacuum processes may be running at one time (up to autovacuum_max_workers, which defaults to 3), and each such process will use the amount of dedicated memory dictated by this parameter. This should be kept in mind when setting this parameter, especially on systems with relatively modest amounts of physical memory, so as to avoid swapping. Swapping has very negative effects on database performance and should be avoided. If the value recommended above is less than 256MB, it is chosen with this consideration in mind. However, the optimal value is workload-dependent, so it may be worthwhile to experiment with higher or lower settings. -| | | -|----------------|-----------------------------------------------| -| Rule | Check effective\_io\_concurrency | -| Recommendation | Consider adjusting effective\_io\_concurrency | -| Trigger | effective\_io\_concurrency < 2 | -| Severity | Low | +| | | +| -------------- | ------------------------------------------- | +| Rule | Check effective_io_concurrency | +| Recommendation | Consider adjusting effective_io_concurrency | +| Trigger | effective_io_concurrency < 2 | +| Severity | Low | -**Description:** If the PostgreSQL data files are located on a RAID array or SSD, effective\_io\_concurrency should be set to the approximate number of I/O requests that the system can service simultaneously. For RAID arrays, this is typically equal to the number of drives in the array. For SSDs, some experimentation may be needed to determine the most effective value. Setting this parameter to an appropriate value impoves the performance of bitmap index scans. The default value of 1 is appropriate for cases where all PostgreSQL data files are located on a single spinning medium. +**Description:** If the PostgreSQL data files are located on a RAID array or SSD, effective_io_concurrency should be set to the approximate number of I/O requests that the system can service simultaneously. For RAID arrays, this is typically equal to the number of drives in the array. For SSDs, some experimentation may be needed to determine the most effective value. Setting this parameter to an appropriate value impoves the performance of bitmap index scans. The default value of 1 is appropriate for cases where all PostgreSQL data files are located on a single spinning medium. | | | -|----------------|----------------------------------| +| -------------- | -------------------------------- | | Rule | Check fsync is enabled | | Recommendation | Consider configuring fsync = on. | | Trigger | fsync = off | @@ -66,110 +65,110 @@ Note: work\_mem can be adjusted for particular databases, users, or user-and -da **Description:** When fsync is set to off, a system crash can result in unrecoverable data loss or non-obvious corruption. fsync = off is an appropriate setting only if you are prepared to erase and recreate all of your databases in the event of a system crash or unexpected power outage. -Note: Much of the performance benefit obtained by configuring fsync = off can also be obtained by configuring synchronous\_commit = off. However, the latter settings is far safer: in the event of a crash, the last few transactions committed might be lost if they have not yet made it to disk, but the database will not be corrupted. +Note: Much of the performance benefit obtained by configuring fsync = off can also be obtained by configuring synchronous_commit = off. However, the latter settings is far safer: in the event of a crash, the last few transactions committed might be lost if they have not yet made it to disk, but the database will not be corrupted. -| | | -|----------------|--------------------------------------------------------------------------------------------------------| -| Rule | Check wal\_sync\_method | -| Recommendation | On Windows, consider configuring wal\_sync\_method = fsync or wal\_sync\_method = fsync\_writethrough. | -| Trigger | OS = Windows and wal\_sync\_method not in ('fsync', 'fsync\_writethrough') | -| Severity | High | +| | | +| -------------- | ------------------------------------------------------------------------------------------------- | +| Rule | Check wal_sync_method | +| Recommendation | On Windows, consider configuring wal_sync_method = fsync or wal_sync_method = fsync_writethrough. | +| Trigger | OS = Windows and wal_sync_method not in ('fsync', 'fsync_writethrough') | +| Severity | High | -**Description:** In order to guarantee reliable crash recovery, PostgreSQL must ensure that the operating system flushes the write-ahead log to disk when asked to do so. On Windows, this can be achieved by setting wal\_sync\_method to fsync or fsync\_writethrough, or by disabling the disk cache on the drive where the write-ahead log is written. (It is safe to leave the disk cache enable if a battery-back disk cache is in use.) +**Description:** In order to guarantee reliable crash recovery, PostgreSQL must ensure that the operating system flushes the write-ahead log to disk when asked to do so. On Windows, this can be achieved by setting wal_sync_method to fsync or fsync_writethrough, or by disabling the disk cache on the drive where the write-ahead log is written. (It is safe to leave the disk cache enable if a battery-back disk cache is in use.) -Note: In cases where the loss of a very recently committed transaction is acceptable, the performance impact of flushing the write ahead log to disk can be mitigated by setting synchronous\_commit = off. In other situations, the use of a battery-backed RAID controller is recommended. +Note: In cases where the loss of a very recently committed transaction is acceptable, the performance impact of flushing the write ahead log to disk can be mitigated by setting synchronous_commit = off. In other situations, the use of a battery-backed RAID controller is recommended. -| | | -|----------------|----------------------------------------------------------------------------| -| Rule | Check wal\_sync\_method | -| Recommendation | On Mac OS X, consider configuring wal\_sync\_method = fsync\_writethrough. | -| Trigger | OS == MacOS X and wal\_sync\_method != fsync\_writethrough | -| Severity | High | +| | | +| -------------- | ----------------------------------------------------------------------- | +| Rule | Check wal_sync_method | +| Recommendation | On Mac OS X, consider configuring wal_sync_method = fsync_writethrough. | +| Trigger | OS == MacOS X and wal_sync_method != fsync_writethrough | +| Severity | High | -**Description:** In order to guarantee reliable crash recovery, PostgreSQL must ensure that the operating system flushes the write-ahead log to disk when asked to do so. On MacOS X, this can be achieved by setting wal\_sync\_method to fsync\_writethrough or by disabling the disk cache on the drive where the write-ahead log is written. It is safe to leave the disk cache enable if a battery-back disk cache is in use. +**Description:** In order to guarantee reliable crash recovery, PostgreSQL must ensure that the operating system flushes the write-ahead log to disk when asked to do so. On MacOS X, this can be achieved by setting wal_sync_method to fsync_writethrough or by disabling the disk cache on the drive where the write-ahead log is written. It is safe to leave the disk cache enable if a battery-back disk cache is in use. -Note: In cases where the loss of a very recently committed transaction is acceptable, the performance impact of flushing the write ahead log to disk can be mitigated by setting synchronous\_commit = off. In other situations, the use of a battery-backed RAID controller is recommended. +Note: In cases where the loss of a very recently committed transaction is acceptable, the performance impact of flushing the write ahead log to disk can be mitigated by setting synchronous_commit = off. In other situations, the use of a battery-backed RAID controller is recommended. -| | | -|----------------|-------------------------------------------------| -| Rule | Check wal\_buffers | -| Recommendation | Consider adjusting wal\_buffers | -| Trigger | wal\_buffers < 1MB or wal\_buffers > 16MB | -| Severity | Medium | +| | | +| -------------- | ------------------------------------------ | +| Rule | Check wal_buffers | +| Recommendation | Consider adjusting wal_buffers | +| Trigger | wal_buffers < 1MB or wal_buffers > 16MB | +| Severity | Medium | -**Description:** Increasing the configuration parameter wal\_buffers from the default value of 64kB to 1MB or more can reduced the number of times the database must flush the write-ahead log, leading to improved performance under some workloads. There is no benefit to setting this parameter to a value greater than the size of a WAL segment (16MB). +**Description:** Increasing the configuration parameter wal_buffers from the default value of 64kB to 1MB or more can reduced the number of times the database must flush the write-ahead log, leading to improved performance under some workloads. There is no benefit to setting this parameter to a value greater than the size of a WAL segment (16MB). -| | | -|----------------|-------------------------------------| -| Rule | Check commit\_delay | -| Recommendation | Consider setting commit\_delay = 0. | -| Trigger | commit\_delay != 0 | -| Severity | Low | +| | | +| -------------- | ---------------------------------- | +| Rule | Check commit_delay | +| Recommendation | Consider setting commit_delay = 0. | +| Trigger | commit_delay != 0 | +| Severity | Low | -**Description:** Setting the commit\_delay configuration parameter to a non-zero value causes the system to wait for the specified number of microseconds before flushing the write-ahead log to disk at commit time, potentially allowing several concurrent transactions to commit with a single log flush. In most cases, this does not produce a performance benefit, and in some cases, it can produce a performance regression. Unless you have confirmed through benchmarking that a non-default value for this parameter produces a performance benefit, the default value of 0 is recommended. +**Description:** Setting the commit_delay configuration parameter to a non-zero value causes the system to wait for the specified number of microseconds before flushing the write-ahead log to disk at commit time, potentially allowing several concurrent transactions to commit with a single log flush. In most cases, this does not produce a performance benefit, and in some cases, it can produce a performance regression. Unless you have confirmed through benchmarking that a non-default value for this parameter produces a performance benefit, the default value of 0 is recommended. -| | | -|----------------|---------------------------------------------------------------| -| Rule | Check checkpoint\_segments | -| Recommendation | Consider adjusting checkpoint\_segments. | -| Trigger | checkpoint\_segments < 10 or checkpoint\_segments > 300 | -| Severity | Medium | +| | | +| -------------- | -------------------------------------------------------- | +| Rule | Check checkpoint_segments | +| Recommendation | Consider adjusting checkpoint_segments. | +| Trigger | checkpoint_segments < 10 or checkpoint_segments > 300 | +| Severity | Medium | -**Description:** In order to ensure reliable and efficient crash recovery, PostgreSQL periodically writes all dirty buffers to disk. This process is called a checkpoint.Checkpoints occur when (1) the number of write-ahead log segments written since the last checkpoint exceeds checkpoint\_segments, (2) the amount of time since the last checkpoint exceeds checkpoint\_timeout, (3) the SQL command CHECKPOINT is issued, or (4) the system completes either shutdown or crash recovery. Increasing the value of checkpoint\_segments will reduce the frequency of checkpoints and will therefore improve performance, especially during bulk loading. The main downside of increasing checkpoint\_segments is that, in the event of a crash, recovery will require a longer period of time to return the database to a consistent state. In addition, increasing checkpoint\_segments will increase disk space consumption during periods of heavy system activity. However, because the theoretical limit on the amount of additional disk space that will be consumed for this reason is less than 32MB per additional checkpoint segment, this is often a small price to pay for improved performance. +**Description:** In order to ensure reliable and efficient crash recovery, PostgreSQL periodically writes all dirty buffers to disk. This process is called a checkpoint.Checkpoints occur when (1) the number of write-ahead log segments written since the last checkpoint exceeds checkpoint_segments, (2) the amount of time since the last checkpoint exceeds checkpoint_timeout, (3) the SQL command CHECKPOINT is issued, or (4) the system completes either shutdown or crash recovery. Increasing the value of checkpoint_segments will reduce the frequency of checkpoints and will therefore improve performance, especially during bulk loading. The main downside of increasing checkpoint_segments is that, in the event of a crash, recovery will require a longer period of time to return the database to a consistent state. In addition, increasing checkpoint_segments will increase disk space consumption during periods of heavy system activity. However, because the theoretical limit on the amount of additional disk space that will be consumed for this reason is less than 32MB per additional checkpoint segment, this is often a small price to pay for improved performance. Values between 30 and 100 are often suitable for modern systems. However, on smaller systems, a value as low as 10 may be appropriate, and on larger systems, a value as 300 may be useful. Values outside this range are generally not worthwhile. -| | | -|----------------|----------------------------------------------------| -| Rule | Check checkpoint\_completion\_target | -| Recommendation | Consider adjusting checkpoint\_completion\_target. | -| Trigger | checkpoint\_completion\_target != 0.9 | -| Severity | Medium | +| | | +| -------------- | ------------------------------------------------ | +| Rule | Check checkpoint_completion_target | +| Recommendation | Consider adjusting checkpoint_completion_target. | +| Trigger | checkpoint_completion_target != 0.9 | +| Severity | Medium | -**Description:** In order to ensure reliable and efficient crash recovery, PostgreSQL periodically writes all dirty buffers to disk. This process is called a checkpoint. Beginning in PostgreSQL 8.3, checkpoints take place over an extended period of time in order to avoid swamping the I/O system. checkpoint\_completion\_target controls the rate at which the checkpoint is performed, as a function of the time remaining before the next checkpoint is due to start. A value of 0 indicates that the checkpoint should be performed as quickly as possible, whereas a value of 1 indicates that the checkpoint should complete just as the next checkpoint is scheduled to start. It is usually beneficial to spread the checkpoint out as much as possible; however, if checkpoint\_completion\_target is set to a value greater than 0.9, unexpected delays near the end of the checkpoint process can cause the checkpoint to fail to complete before the next one needs to start. Because of this, the recommended setting is 0.9. +**Description:** In order to ensure reliable and efficient crash recovery, PostgreSQL periodically writes all dirty buffers to disk. This process is called a checkpoint. Beginning in PostgreSQL 8.3, checkpoints take place over an extended period of time in order to avoid swamping the I/O system. checkpoint_completion_target controls the rate at which the checkpoint is performed, as a function of the time remaining before the next checkpoint is due to start. A value of 0 indicates that the checkpoint should be performed as quickly as possible, whereas a value of 1 indicates that the checkpoint should complete just as the next checkpoint is scheduled to start. It is usually beneficial to spread the checkpoint out as much as possible; however, if checkpoint_completion_target is set to a value greater than 0.9, unexpected delays near the end of the checkpoint process can cause the checkpoint to fail to complete before the next one needs to start. Because of this, the recommended setting is 0.9. -| | | -|-------------------|-----------------------------------------------------------------------------------------------------------------------------------| -| Rule | Check effective\_cache\_size | -| Recommendation | Consider adjusting effective\_cache\_size. | -| Trigger | effective\_cache\_size < 0.5 \* system\_memory or effective\_cache\_size > MAX(0.9 \* system\_memory, system\_memory - 1GB) | -| Recommended value | 0.75 \* system\_memory | -| Severity | Medium | +| | | +| ----------------- | ----------------------------------------------------------------------------------------------------------------------- | +| Rule | Check effective_cache_size | +| Recommendation | Consider adjusting effective_cache_size. | +| Trigger | effective_cache_size < 0.5 \* system_memory or effective_cache_size > MAX(0.9 \* system_memory, system_memory - 1GB) | +| Recommended value | 0.75 \* system_memory | +| Severity | Medium | **Description:** When estimating the cost of a nested loop with an inner index-scan, PostgreSQL uses this parameter to estimate the chances that rows from the inner relation which are fetched multiple times will still be in cache when the second fetch occurs. Changing this parameter does not allocate any memory, but an excessively small value may discourage the planner from using indexes that would in fact speed up the query. The recommended value is 75% of system memory. -| | | -|-------------------|-----------------------------------------------------------------------------| -| Rule | Check default\_statistics\_target | -| Recommendation | Consider adjusting default\_statistics\_target. | -| Trigger | default\_statistics\_target < 25 or default\_statistics\_target > 400 | -| Recommended value | 100 | -| Severity | Medium | +| | | +| ----------------- | -------------------------------------------------------------------- | +| Rule | Check default_statistics_target | +| Recommendation | Consider adjusting default_statistics_target. | +| Trigger | default_statistics_target < 25 or default_statistics_target > 400 | +| Recommended value | 100 | +| Severity | Medium | **Description:** PostgreSQL uses statistics to generate good query plans. These statistics are gathered either by a manual ANALYZE command or by an automatic analyze launched by the autovacuum daemon, and they include the most common values in each column of each database table, the approximate distribution of the remaining values, the fraction of rows which are NULL, and several other pieces of statistical information. -default\_statistics\_target indicates the level of detail that should be used in gathering and recording these statistics. A value of 100, which is the default beginning in PostgreSQL 8.4, is reasonable for most workloads. For very simple queries, a smaller value may be useful, while for complex queries especially against large tables, a higher value may work better. In some case, it can be helpful to override the default statistics target for specific table columns using ALTER TABLE .. ALTER COLUMN .. SET STATISTICS. +default_statistics_target indicates the level of detail that should be used in gathering and recording these statistics. A value of 100, which is the default beginning in PostgreSQL 8.4, is reasonable for most workloads. For very simple queries, a smaller value may be useful, while for complex queries especially against large tables, a higher value may work better. In some case, it can be helpful to override the default statistics target for specific table columns using ALTER TABLE .. ALTER COLUMN .. SET STATISTICS. | | | -|----------------|----------------------------------| +| -------------- | -------------------------------- | | Rule | Check planner methods is enabled | | Recommendation | Avoid disabling planner methods. | -| Trigger | any [enable]()\* GUC is off | +| Trigger | any [enable](<>)\* GUC is off | | Severity | High | -**Description:** The enable\_bitmapscan, enable\_hashagg, enable\_hashjoin, enable\_indexscan, enable\_material, enable\_mergejoin, enable\_nestloop, enable\_seqscan, enable\_sort, and enable\_tidscan parameters are intended primarily for debugging and should not be turned off. It can sometimes be helpful to disable one or more of these parameters for a particular query, when there is no other way to obtain the desired plan. However, none of these parameters should ever be turned off on a system-wide basis. +**Description:** The enable_bitmapscan, enable_hashagg, enable_hashjoin, enable_indexscan, enable_material, enable_mergejoin, enable_nestloop, enable_seqscan, enable_sort, and enable_tidscan parameters are intended primarily for debugging and should not be turned off. It can sometimes be helpful to disable one or more of these parameters for a particular query, when there is no other way to obtain the desired plan. However, none of these parameters should ever be turned off on a system-wide basis. -| | | -|----------------|------------------------------------------| -| Rule | Check track\_counts is enabled | -| Recommendation | Consider configuring track\_counts = on. | -| Trigger | track\_counts = off | -| Severity | High | +| | | +| -------------- | --------------------------------------- | +| Rule | Check track_counts is enabled | +| Recommendation | Consider configuring track_counts = on. | +| Trigger | track_counts = off | +| Severity | High | -**Description:** Autovacuum will not function properly if track\_counts is disabled. Regular vacuuming is crucial to system stability and performance. +**Description:** Autovacuum will not function properly if track_counts is disabled. Regular vacuuming is crucial to system stability and performance. | | | -|----------------|---------------------------------------| +| -------------- | ------------------------------------- | | Rule | Check autovacuum is enabled | | Recommendation | Consider configuring autovacuum = on. | | Trigger | autovacuum = off | @@ -177,29 +176,29 @@ default\_statistics\_target indicates the level of detail that should be used in **Description:** Enabling autovacuum is an important part of maintaining system stability and performance. Although disabling autovacuum may be useful during bulk loading, it should always be promptly reenabled when bulk loading is completed. Leaving autovacuum disabled for extended periods of time will result in table and index "bloat",where available free space is not reused, resulting in uncontrolled table and index growth. Reversing such bloat requires invasive maintenance using CLUSTER, REINDEX, and/or VACUUM FULL. Allowing autovacuum to work normally is usually sufficient to avoid the need for such maintenance. -| | | -|----------------|----------------------------------------------------------------| -| Rule | Check configuring seq\_page\_cost | -| Recommendation | Consider configuring seq\_page\_cost <= random\_page\_cost. | -| Trigger | seq\_page\_cost > random\_page\_cost | -| Severity | Medium | +| | | +| -------------- | ---------------------------------------------------------- | +| Rule | Check configuring seq_page_cost | +| Recommendation | Consider configuring seq_page_cost <= random_page_cost. | +| Trigger | seq_page_cost > random_page_cost | +| Severity | Medium | -**Description:** seq\_page\_cost and random\_page\_cost are parameters used by the query parameter to determine the optimal plan for each query. seq\_page\_cost represents the cost of a sequential page read, while random\_page\_cost represents the cost of a random page read. While these costs might be equal, if, for example, the database is fully cached in RAM, the sequential cost can never be higher. The PostgreSQL query planner will produce poor plans if seq\_page\_cost is set higher than random\_page\_cost. +**Description:** seq_page_cost and random_page_cost are parameters used by the query parameter to determine the optimal plan for each query. seq_page_cost represents the cost of a sequential page read, while random_page_cost represents the cost of a random page read. While these costs might be equal, if, for example, the database is fully cached in RAM, the sequential cost can never be higher. The PostgreSQL query planner will produce poor plans if seq_page_cost is set higher than random_page_cost. -| | | -|----------------|-----------------------------------------------------------------------------| -| Rule | Check reducing random\_page\_cost | -| Recommendation | Consider reducing random\_page\_cost to no more than twice seq\_page\_cost. | -| Trigger | random\_page\_cost > 2 \* seq\_page\_cost | -| Severity | Low | +| | | +| -------------- | ----------------------------------------------------------------------- | +| Rule | Check reducing random_page_cost | +| Recommendation | Consider reducing random_page_cost to no more than twice seq_page_cost. | +| Trigger | random_page_cost > 2 \* seq_page_cost | +| Severity | Low | -**Description:** seq\_page\_cost and random\_page\_cost are parameters used by the query parameter to determine the optimal plan for each query. seq\_page\_cost represents the cost of a sequential page read, while random\_page\_cost represents the cost of a random page read. random\_page\_cost should always be greater than or equal to seq\_page\_cost, but it is rarely beneficial to set random\_page\_cost to a value more than twice seq\_page\_cost. However, the correct values for these variables are workload-dependent. If the database's working set is much larger than physical memory and the blocks needed to execute a query will rarely be in cache, setting random\_page\_cost to a value greater than twice seq\_page\_cost may maximize performance. +**Description:** seq_page_cost and random_page_cost are parameters used by the query parameter to determine the optimal plan for each query. seq_page_cost represents the cost of a sequential page read, while random_page_cost represents the cost of a random page read. random_page_cost should always be greater than or equal to seq_page_cost, but it is rarely beneficial to set random_page_cost to a value more than twice seq_page_cost. However, the correct values for these variables are workload-dependent. If the database's working set is much larger than physical memory and the blocks needed to execute a query will rarely be in cache, setting random_page_cost to a value greater than twice seq_page_cost may maximize performance. -| | | -|----------------|----------------------------------------------------------------------------------------------------------------------------------| -| Rule | Check increasing seq\_page\_cost | -| Recommendation | Consider increasing seq\_page\_cost. | -| Trigger | seq\_page\_cost < cpu\_tuple\_cost, seq\_page\_cost < cpu\_index\_tuple\_cost, or seq\_page\_cost < cpu\_operator\_cost | -| Severity | Medium | +| | | +| -------------- | ------------------------------------------------------------------------------------------------------------------- | +| Rule | Check increasing seq_page_cost | +| Recommendation | Consider increasing seq_page_cost. | +| Trigger | seq_page_cost < cpu_tuple_cost, seq_page_cost < cpu_index_tuple_cost, or seq_page_cost < cpu_operator_cost | +| Severity | Medium | -**Description:** The cost of reading a page into the buffer cache, even if it is already resident in the operating system buffer cache, is rarely less than the cost of a CPU operation. Thus, the value of the configuration parameter seq\_page\_cost should usually be greater than the values of the configuration parameters cpu\_tuple\_cost ,cpu\_index\_tuple\_cost, and cpu\_operator\_cost. +**Description:** The cost of reading a page into the buffer cache, even if it is already resident in the operating system buffer cache, is rarely less than the cost of a CPU operation. Thus, the value of the configuration parameter seq_page_cost should usually be greater than the values of the configuration parameters cpu_tuple_cost ,cpu_index_tuple_cost, and cpu_operator_cost. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/index.mdx index 8efb5ea4f95..b7a1eb57a79 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/07_pem_postgres_expert/index.mdx @@ -4,12 +4,11 @@ title: "Postgres Expert" - Postgres Expert analyzes the configuration of servers that are registered with the Enterprise Manager, and provides advice about: -- [Server Performance](../04_toc_pem_features/07_pem_postgres_expert/#pe_schema_config_expert_recommendations) -- [Server Security](../04_toc_pem_features/07_pem_postgres_expert/#pe_security_expert_recommendations) -- [Server Configuration](../04_toc_pem_features/07_pem_postgres_expert/#pe_configuration_expert_recommendations) +- [Server Performance](01_pe_schema_config_expert_recommendations/#pe_schema_config_expert_recommendations) +- [Server Security](02_pe_security_expert_recommendations/#pe_security_expert_recommendations) +- [Server Configuration](03_pe_configuration_expert_recommendations/#pe_configuration_expert_recommendations) Postgres Expert is an advisory utility; after analyzing the selected servers, Postgres Expert produces a report containing analysis of potential performance and security issues, along with suggestions for addressing each such issue. @@ -55,7 +54,13 @@ If your report contains recommendations for more than one server, you can use th For each server analyzed, the Postgres Expert returns recommendations from the `Configuration Expert`, the `Schema Expert`, and the `Security Expert`. Each expert returns a list of rules that raised an alert, the database that the rule pertains to, and the severity level of the alert. Click on a rule name to view detailed information about the selected rule: -
Section Heading

Contains

Trigger

A description of the rule that raised the alert.

Recommended Value

The value to which Postgres Expert recommends setting the selected parameter.

Description

Information and advice about the parameter that caused the alert.

Current Values

The current value(s) of the parameter(s).

+| | | +| ----------------- | ----------------------------------------------------------------------------------------------------------- | +| Section Heading |


Contains

| +| Trigger |


A description of the rule that raised the alert.

| +| Recommended Value |


The value to which Postgres Expert recommends setting the selected parameter.

| +| Description |


Information and advice about the parameter that caused the alert.

| +| Current Values |


The current value(s) of the parameter(s).

| ![Postgres Expert Report - Parameter Value recommendation](../../images/pe_param_value.png) @@ -63,6 +68,6 @@ For more information about each rule checked by the Postgres Expert, see:
-pe\_schema\_config\_expert\_recommendations pe\_security\_expert\_recommendations pe\_configuration\_expert\_recommendations +pe_schema_config_expert_recommendations pe_security_expert_recommendations pe_configuration_expert_recommendations
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/01_capacity_manager_metrics.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/01_capacity_manager_metrics.mdx index 5352595cd57..5d35088a88e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/01_capacity_manager_metrics.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/01_capacity_manager_metrics.mdx @@ -4,7 +4,6 @@ title: "Tab 1 (Metrics)" - To create a Capacity Manager report, expand the tree control on the `Metrics` tab to locate the metrics that are available for the node that you wish to analyze. ![Capacity manager metrics selection list](../../images/capacity_manager_metrics.png) @@ -24,4 +23,4 @@ To remove a metric from the Capacity Manager report, uncheck the box to the left Move the slider next to `Graph/chart metrics individually?` to `Yes` to instruct Capacity Manager to produce a separate report for each metric selected on the `Metrics` tab. If the option is set to `No`, all selected metrics will be merged into a single graph or table. -Click the `Generate` button to display the report onscreen (accepting the default configuration options), or continue to the [Options](../../04_toc_pem_features/08_capacity_manager/#capacity_manager_options) tab to specify sampling boundaries, report type and report destination. +Click the `Generate` button to display the report onscreen (accepting the default configuration options), or continue to the [Options](02_capacity_manager_options/#capacity_manager_options) tab to specify sampling boundaries, report type and report destination. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/02_capacity_manager_options.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/02_capacity_manager_options.mdx index 4f0fb9c5a1f..5c0864b65e7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/02_capacity_manager_options.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/02_capacity_manager_options.mdx @@ -4,7 +4,6 @@ title: "Tab 2 (Options)" - Use the fields on the `Options` tab to specify the starting and ending boundaries of the Capacity Manager report, the type of report generated, and the location to which the report will be displayed or written. ![Capacity manager options](../../images/capacity_manager_options.png) @@ -14,7 +13,7 @@ Use the fields within the `Time Period` box to define the boundaries of the Capa - Use the `Period` drop-down listbox to select the type of time period you wish to use for the report. You can select: | Value | Description | -|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Start time and end time | Specify a start and end date and time for the report. | | Start time and threshold | Specify a start date and time, and a threshold to determine the end time and date for the report. | | Historical days and extrapolated days | Specify a start date for the report that is a number of days in the past, and an end date that is a number of days in the future. This option is useful for report templates that do not specify fixed dates. | @@ -31,7 +30,7 @@ After specifying the type of time period for the report, select from other optio > > **Please Note:** If you specify a starting boundary that is later than the ending boundary for the report, the status bar will display an error informing you that you must enter a valid time. -The cm\_max\_end\_date\_in\_years <pem\_config\_options> configuration parameter defines a default time value for the end boundary of Capacity Manager reports. If you specify a threshold value as the end boundary of a report, and the anticipated usage of the boundary is not met before the maximum time has passed (as specified in the `cm_max_date_in_years` parameter), the report will terminate at the time specified by the `cm_max_date_in_years` parameter. By default, `cm_max_end_date_in_years` is 5; use the `` `Server Configuration `` dialog <pem\_server\_config> to modify the value of cm\_max\_end\_date\_in\_years\`. +The cm_max_end_date_in_years <pem_config_options> configuration parameter defines a default time value for the end boundary of Capacity Manager reports. If you specify a threshold value as the end boundary of a report, and the anticipated usage of the boundary is not met before the maximum time has passed (as specified in the `cm_max_date_in_years` parameter), the report will terminate at the time specified by the `cm_max_date_in_years` parameter. By default, `cm_max_end_date_in_years` is 5; use the `` `Server Configuration `` dialog <pem_server_config> to modify the value of cm_max_end_date_in_years\`. **Please Note:** The PEM client will display time in the PEM client's timezone, rather than the timezone in which the PEM server resides. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/index.mdx index 7a68be69371..1e0f7ab6546 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/08_capacity_manager/index.mdx @@ -4,7 +4,6 @@ title: "Capacity Manager" - PEM's Capacity Manager analyzes collected statistics (metrics) to generate a graph or table that displays the historical usage statistics of an object, and can project the anticipated usage statistics for an object. You can configure Capacity Manager to collect and analyze metrics for a specific: - Host/operating system @@ -18,13 +17,13 @@ To open Capacity Manager, select the `Capacity Manager...` option from the `Mana ![Capacity Manager dialog](../../images/capacity_manager_opens.png) -Expand the tree control on the [Metrics](../04_toc_pem_features/08_capacity_manager/#capacity_manager_metrics) tab to select the metrics that will be included in the Capacity Manager report. +Expand the tree control on the [Metrics](01_capacity_manager_metrics/#capacity_manager_metrics) tab to select the metrics that will be included in the Capacity Manager report. When defining report options, you can specify an `aggregation` method for each selected metric. The aggregation method determines how Capacity Manager will analyze the data points within the sampling period to reduce the data to a more visually meaningful quantity within a report (if required). The aggregation method can instruct Capacity Manager to compute an average of the data within a time period, the high or low value, or the first sampled value. -Use the [Options](../04_toc_pem_features/08_capacity_manager/#capacity_manager_options) tab to specify additional report details. +Use the [Options](02_capacity_manager_options/#capacity_manager_options) tab to specify additional report details. -When defining the boundaries of a Capacity Manager report, specify the starting date and time, and an end boundary. The end boundary can be a point in time or a threshold boundary (when the data meets a specified criteria). If the sample contains more data points than the number of points specified by the cm\_data\_points\_per\_report <pem\_config\_options> configuration parameter, Capacity Manager applies the aggregation method to calculate a reduced number of graph points for the report. +When defining the boundaries of a Capacity Manager report, specify the starting date and time, and an end boundary. The end boundary can be a point in time or a threshold boundary (when the data meets a specified criteria). If the sample contains more data points than the number of points specified by the cm_data_points_per_report <pem_config_options> configuration parameter, Capacity Manager applies the aggregation method to calculate a reduced number of graph points for the report. ## Report Templates @@ -41,130 +40,131 @@ When creating a report, you can use the `Load Template` button to browse and ope Please Note that the available metrics will vary by platform, and are subject to change. The available metrics may include the metrics described in the table below: -| Metric Name | Description | -|----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| \# Dead Tuples | The number of dead tuples in the selected table. | -| \# Dead Tuples+ | The cumulative number of dead tuples in the selected table. | -| \# Heap Tuples Fetched by Index Scans | The number of heap tuples fetched by index scans. | -| \# Heap Tuples Fetched by Index Scans+ | The cumulative number of heap tuples fetched by index scans. | -| \# Idle Backends+ | The cumulative number of currently idle backend clients. | -| \# Index Scans | The number of index scans performed on the specified object. | -| \# Index Scans+ | The cumulative number of index scans performed on the specified object. | -| \# Index Tuples Read | The number of index tuples read. | -| \# Index Tuples Read+ | The cumulative number of index tuples read. | -| \# Live Tuples | The number of tuples visible to transactions. | -| \# Live Tuples+ | The cumulative number of tuples visible to transactions. | -| \# Pages Estimated by ANALYZE | The number of pages estimated by ANALYZE. | -| \# Pages Estimated by ANALYZE+ | The cumulative number of pages estimated by ANALYZE. | -| \# Sequential Scans | The number of sequential scans performed on the specific table. | -| \# Sequential Scans+ | The cumulative number of sequential scans performed on the specific table. | -| \# Sequential Scan Tuples | The number of tuples sequentially scanned in the specific table. | -| \# Sequential Scan Tuples+ | The cumulative number of tuples sequentially scanned in the specific table. | -| \# Tuples Deleted | The number of tuples deleted. | -| \# Tuples Deleted+ | The cumulative number of tuples deleted. | -| \# Tuples Estimated by ANALYZE | The number of live (visible) tuples estimated by ANALYZE. | -| \# Tuples Estimated by ANALYZE+ | The cumulative number of live tuples estimated by ANALYZE. | -| \# Tuples HOT Updated | The number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | -| \# Tuples HOT Updated+ | The cumulative number of tuples HOT updated. | -| \# Tuples Inserted | The number of tuples inserted into the specified table. | -| \# Tuples Inserted+ | The cumulative number of tuples inserted into the specified table. | -| \# Tuples Updated | The number of tuples updated in the selected table. | -| \# Tuples Updated+ | The cumulative number of tuples updated in the selected table. | -| Blocks Hit | The number of blocks found in the cache. | -| Blocks Hit+ | The cumulative number of blocks found in the cache. | -| Blocks Read | The number of blocks read. | -| Blocks Read+ | The cumulative number of blocks read. | -| Blocks Read from InfiniteCache | The number of blocks read from InfiniteCache. | -| Blocks Read from InfiniteCache+ | The cumulative number of blocks read from InfiniteCache. | -| Blocks Written | The number of blocks written. | -| Blocks Written+ | The cumulative number of blocks written. | -| Buffers Allocated | The number of buffers allocated. | -| Buffers Allocated+ | The cumulative number of buffers allocated. | -| Buffers Written - Backends | The number of buffer blocks written to disk by server processes (processes connected to a client application). | -| Buffers Written - Backends+ | The cumulative number of buffer blocks written to disk by server processes. | -| Buffers Written - Checkpoint | The number of blocks written to disk by the checkpoint process. | -| Buffers Written - Checkpoint+ | The cumulative number of blocks written to disk by the checkpoint process. | -| Buffers Written - Cleaning Scan | The number of blocks written to disk by the autovacuum process. | -| Buffers Written - Cleaning Scan+ | The cumulative number of blocks written to disk by the autovacuum process. | -| Bytes Received (KB) | The number of bytes received from the client (in kilobytes). | -| Bytes Received (KB)+ | The cumulative number of bytes received (in kilobytes). | -| Bytes Sent (KB) | The number of bytes sent to the client (in kilobytes). | -| Bytes Sent (KB)+ | The cumulative number of bytes sent (in kilobytes). | -| Checkpoints - Timed | The number of checkpoint operations triggered by the checkpoint interval. | -| Checkpoints - Timed+ | The cumulative number of checkpoint operations triggered by the checkpoint interval. | -| Checkpoints - Untimed | The number of checkpoint operations triggered by checkpoint size. | -| Checkpoints - Untimed+ | The cumulative number of checkpoint operations triggered by checkpoint size. | -| Database Size (MB) | The size of the specified database (in megabytes). | -| Free RAM Memory | The amount of free RAM memory (in megabytes). | -| Free Swap Memory | The amount of free swap space on disk (in megabytes). | -| Heap Blocks Hit | The number of heap blocks found in the cache. | -| Heap Blocks Hit+ | The cumulative number of heap blocks found in the cache. | -| Heap Blocks Read | The number of heap blocks read. | -| Heap Blocks Read+ | The cumulative number of heap blocks read. | -| Index Blocks Hit | The number of index blocks found in the cache. | -| Index Blocks Hit+ | The cumulative number of index blocks found in the cache. | -| Index Blocks Read | The number of index blocks read. | -| Index Blocks Read+ | The cumulative number of index blocks read. | -| Index Size (MB) | The size of the specified index (in megabytes). | -| In Packets Discards | The number of inbound packets discarded. | -| In Packets Discards+ | The cumulative number of inbound packets discarded. | -| In Packets Errors | The number of inbound packets that contain errors. | -| In Packets Errors+ | The cumulative number of inbound packets that contain errors. | -| Link Bandwidth (Mbit/s) | The speed of the network adapter (in megabits per second). | -| Load Average - 15 Minute | CPU saturation (in percent) - 15 minute sampling average. | -| Load Average - 1 Minute | CPU saturation (in percent) - 1 minute sampling average. | -| Load Average - 5 Minute | CPU saturation (in percent) - 5 minute sampling average. | -| Load Percentage | CPU saturation in percent. | -| Number of Prepared Transactions+ | The cumulative number of prepared transactions. | -| Number of WAL Files+ | The cumulative number of write-ahead log files. | -| Out Packets Discards | The number of outbound packets discarded. | -| Out Packets Discards+ | The cumulative number of outbound packets discarded. | -| Out Packets Errors | The number of outbound packets that contain errors. | -| Out Packets Errors+ | The cumulative number of outbound packets that contain errors. | -| Packets Received | The number of packets received. | -| Packets Received+ | The cumulative number of packets received. | -| Packets Sent | The number of packets sent. | -| Packets Sent+ | The cumulative number of packets sent. | -| Size (MB) | The total size of the disk (in megabytes). | -| Size of Indexes (MB) | The size of indexes on the specified table (in megabytes). | -| Space Available (MB) | The current disk space available (in megabytes). | -| Space Used (MB) | The current disk space used (in megabytes). | -| Table Size (MB) | The size of the specified table (in megabytes). | -| Tablespace Size (MB) | The size of the specified tablespace (in megabytes). | -| Temp Buffers (MB) | The size of temporary buffers (in megabytes). | -| Toast Blocks Hit | The number of TOAST blocks found in the cache. | -| Toast Blocks Hit+ | The cumulative number of TOAST blocks found in the cache. | -| Toast Blocks Read | The number of TOAST blocks read. | -| Toast Blocks Read+ | The cumulative number of TOAST blocks read. | -| Total RAM Memory | The total amount of RAM memory on the system (in megabytes). | -| Total Swap Memory | The total amount of swap space on the system (in megabytes). | -| Total Table Size w/Indexes and Toast | The total size of the specified table (including indexes and associated oversized attributes). | -| Transactions Aborted | The number of aborted transactions. | -| Transactions Aborted+ | The cumulative number of aborted transactions. | -| Transactions Committed | The number of committed transactions. | -| Transactions Committed+ | The cumulative number of committed transactions. | -| Tuples Deleted | The number of tuples deleted from the specified table. | -| Tuples Deleted+ | The cumulative number of tuples deleted from the specified table. | -| Tuples Estimated by ANALYZE | The number of visible tuples in the specified table. | -| Tuples Estimated by ANALYZE+ | The cumulative number of visible tuples in the specified table. | -| Tuples Fetched | The number of tuples fetched from the specified table. | -| Tuples Fetched+ | The cumulative number of tuples fetched from the specified table. | -| Tuples HOT Updated | The number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | -| Tuples HOT Updated+ | The cumulative number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | -| Tuples Inserted | The number of tuples inserted into the specified table. | -| Tuples Inserted+ | The cumulative number of tuples inserted into the specified table. | -| Tuples Returned | The number of tuples returned in result sets. | -| Tuples Returned+ | The cumulative number of tuples returned in result sets. | -| Tuples Updated | The number of tuples updated in the specified table. | -| Tuples Updated+ | The cumulative number of tuples updated in the specified table. | -| WAL Segment Size (MB) | The segment size of the write-ahead log (in megabytes). | - -**Note:** The '+' following the name of a metric signifies that the data for the metric is gathered cumulatively; those metrics that are not followed by the '+' sign are collected as a 'point-in-time' value. +| Metric Name | Description | +| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| # Dead Tuples | The number of dead tuples in the selected table. | +| # Dead Tuples+ | The cumulative number of dead tuples in the selected table. | +| # Heap Tuples Fetched by Index Scans | The number of heap tuples fetched by index scans. | +| # Heap Tuples Fetched by Index Scans+ | The cumulative number of heap tuples fetched by index scans. | +| # Idle Backends+ | The cumulative number of currently idle backend clients. | +| # Index Scans | The number of index scans performed on the specified object. | +| # Index Scans+ | The cumulative number of index scans performed on the specified object. | +| # Index Tuples Read | The number of index tuples read. | +| # Index Tuples Read+ | The cumulative number of index tuples read. | +| # Live Tuples | The number of tuples visible to transactions. | +| # Live Tuples+ | The cumulative number of tuples visible to transactions. | +| # Pages Estimated by ANALYZE | The number of pages estimated by ANALYZE. | +| # Pages Estimated by ANALYZE+ | The cumulative number of pages estimated by ANALYZE. | +| # Sequential Scans | The number of sequential scans performed on the specific table. | +| # Sequential Scans+ | The cumulative number of sequential scans performed on the specific table. | +| # Sequential Scan Tuples | The number of tuples sequentially scanned in the specific table. | +| # Sequential Scan Tuples+ | The cumulative number of tuples sequentially scanned in the specific table. | +| # Tuples Deleted | The number of tuples deleted. | +| # Tuples Deleted+ | The cumulative number of tuples deleted. | +| # Tuples Estimated by ANALYZE | The number of live (visible) tuples estimated by ANALYZE. | +| # Tuples Estimated by ANALYZE+ | The cumulative number of live tuples estimated by ANALYZE. | +| # Tuples HOT Updated | The number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | +| # Tuples HOT Updated+ | The cumulative number of tuples HOT updated. | +| # Tuples Inserted | The number of tuples inserted into the specified table. | +| # Tuples Inserted+ | The cumulative number of tuples inserted into the specified table. | +| # Tuples Updated | The number of tuples updated in the selected table. | +| # Tuples Updated+ | The cumulative number of tuples updated in the selected table. | +| Blocks Hit | The number of blocks found in the cache. | +| Blocks Hit+ | The cumulative number of blocks found in the cache. | +| Blocks Read | The number of blocks read. | +| Blocks Read+ | The cumulative number of blocks read. | +| Blocks Read from InfiniteCache | The number of blocks read from InfiniteCache. | +| Blocks Read from InfiniteCache+ | The cumulative number of blocks read from InfiniteCache. | +| Blocks Written | The number of blocks written. | +| Blocks Written+ | The cumulative number of blocks written. | +| Buffers Allocated | The number of buffers allocated. | +| Buffers Allocated+ | The cumulative number of buffers allocated. | +| Buffers Written - Backends | The number of buffer blocks written to disk by server processes (processes connected to a client application). | +| Buffers Written - Backends+ | The cumulative number of buffer blocks written to disk by server processes. | +| Buffers Written - Checkpoint | The number of blocks written to disk by the checkpoint process. | +| Buffers Written - Checkpoint+ | The cumulative number of blocks written to disk by the checkpoint process. | +| Buffers Written - Cleaning Scan | The number of blocks written to disk by the autovacuum process. | +| Buffers Written - Cleaning Scan+ | The cumulative number of blocks written to disk by the autovacuum process. | +| Bytes Received (KB) | The number of bytes received from the client (in kilobytes). | +| Bytes Received (KB)+ | The cumulative number of bytes received (in kilobytes). | +| Bytes Sent (KB) | The number of bytes sent to the client (in kilobytes). | +| Bytes Sent (KB)+ | The cumulative number of bytes sent (in kilobytes). | +| Checkpoints - Timed | The number of checkpoint operations triggered by the checkpoint interval. | +| Checkpoints - Timed+ | The cumulative number of checkpoint operations triggered by the checkpoint interval. | +| Checkpoints - Untimed | The number of checkpoint operations triggered by checkpoint size. | +| Checkpoints - Untimed+ | The cumulative number of checkpoint operations triggered by checkpoint size. | +| Database Size (MB) | The size of the specified database (in megabytes). | +| Free RAM Memory | The amount of free RAM memory (in megabytes). | +| Free Swap Memory | The amount of free swap space on disk (in megabytes). | +| Heap Blocks Hit | The number of heap blocks found in the cache. | +| Heap Blocks Hit+ | The cumulative number of heap blocks found in the cache. | +| Heap Blocks Read | The number of heap blocks read. | +| Heap Blocks Read+ | The cumulative number of heap blocks read. | +| Index Blocks Hit | The number of index blocks found in the cache. | +| Index Blocks Hit+ | The cumulative number of index blocks found in the cache. | +| Index Blocks Read | The number of index blocks read. | +| Index Blocks Read+ | The cumulative number of index blocks read. | +| Index Size (MB) | The size of the specified index (in megabytes). | +| In Packets Discards | The number of inbound packets discarded. | +| In Packets Discards+ | The cumulative number of inbound packets discarded. | +| In Packets Errors | The number of inbound packets that contain errors. | +| In Packets Errors+ | The cumulative number of inbound packets that contain errors. | +| Link Bandwidth (Mbit/s) | The speed of the network adapter (in megabits per second). | +| Load Average - 15 Minute | CPU saturation (in percent) - 15 minute sampling average. | +| Load Average - 1 Minute | CPU saturation (in percent) - 1 minute sampling average. | +| Load Average - 5 Minute | CPU saturation (in percent) - 5 minute sampling average. | +| Load Percentage | CPU saturation in percent. | +| Number of Prepared Transactions+ | The cumulative number of prepared transactions. | +| Number of WAL Files+ | The cumulative number of write-ahead log files. | +| Out Packets Discards | The number of outbound packets discarded. | +| Out Packets Discards+ | The cumulative number of outbound packets discarded. | +| Out Packets Errors | The number of outbound packets that contain errors. | +| Out Packets Errors+ | The cumulative number of outbound packets that contain errors. | +| Packets Received | The number of packets received. | +| Packets Received+ | The cumulative number of packets received. | +| Packets Sent | The number of packets sent. | +| Packets Sent+ | The cumulative number of packets sent. | +| Size (MB) | The total size of the disk (in megabytes). | +| Size of Indexes (MB) | The size of indexes on the specified table (in megabytes). | +| Space Available (MB) | The current disk space available (in megabytes). | +| Space Used (MB) | The current disk space used (in megabytes). | +| Table Size (MB) | The size of the specified table (in megabytes). | +| Tablespace Size (MB) | The size of the specified tablespace (in megabytes). | +| Temp Buffers (MB) | The size of temporary buffers (in megabytes). | +| Toast Blocks Hit | The number of TOAST blocks found in the cache. | +| Toast Blocks Hit+ | The cumulative number of TOAST blocks found in the cache. | +| Toast Blocks Read | The number of TOAST blocks read. | +| Toast Blocks Read+ | The cumulative number of TOAST blocks read. | +| Total RAM Memory | The total amount of RAM memory on the system (in megabytes). | +| Total Swap Memory | The total amount of swap space on the system (in megabytes). | +| Total Table Size w/Indexes and Toast | The total size of the specified table (including indexes and associated oversized attributes). | +| Transactions Aborted | The number of aborted transactions. | +| Transactions Aborted+ | The cumulative number of aborted transactions. | +| Transactions Committed | The number of committed transactions. | +| Transactions Committed+ | The cumulative number of committed transactions. | +| Tuples Deleted | The number of tuples deleted from the specified table. | +| Tuples Deleted+ | The cumulative number of tuples deleted from the specified table. | +| Tuples Estimated by ANALYZE | The number of visible tuples in the specified table. | +| Tuples Estimated by ANALYZE+ | The cumulative number of visible tuples in the specified table. | +| Tuples Fetched | The number of tuples fetched from the specified table. | +| Tuples Fetched+ | The cumulative number of tuples fetched from the specified table. | +| Tuples HOT Updated | The number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | +| Tuples HOT Updated+ | The cumulative number of tuples HOT updated. In a HOT update, the new tuple resides in the same block as the original tuple and the tuples share an index entry. | +| Tuples Inserted | The number of tuples inserted into the specified table. | +| Tuples Inserted+ | The cumulative number of tuples inserted into the specified table. | +| Tuples Returned | The number of tuples returned in result sets. | +| Tuples Returned+ | The cumulative number of tuples returned in result sets. | +| Tuples Updated | The number of tuples updated in the specified table. | +| Tuples Updated+ | The cumulative number of tuples updated in the specified table. | +| WAL Segment Size (MB) | The segment size of the write-ahead log (in megabytes). | + +!!! Note + The '+' following the name of a metric signifies that the data for the metric is gathered cumulatively; those metrics that are not followed by the '+' sign are collected as a 'point-in-time' value. Contents:
-capacity\_manager\_metrics capacity\_manager\_options +capacity_manager_metrics capacity_manager_options
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/01_pem_alerting_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/01_pem_alerting_dialog.mdx index a95bcc94582..0b0490872d5 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/01_pem_alerting_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/01_pem_alerting_dialog.mdx @@ -4,18 +4,17 @@ title: "Creating and Managing Alerts" - Use options accessed through the `Manage Alerts` tab to create, copy, or modify an alert. To open the `Manage Alerts` tab, select `Manage Alerts...` from the PEM client's `Management` menu. ![Manage Alerts](../../images/alerting_manage_alerts.png) Use the `Quick Links` toolbar to open dialogs and tabs that you can use to manage alerts and alerting behavior: -> - Select `Copy Alerts` to open the [Copy Alert Configuration](../../04_toc_pem_features/09_pem_alerting/#pem_alert_copy) dialog and copy an alert definition. -> - Select `Alert Templates` to open the [Alert Template](../../04_toc_pem_features/09_pem_alerting/#pem_custom_alert_templates) tab, and create or modify an alert template. -> - Select `Email Groups` to open the [Email Groups](../../04_toc_pem_features/09_pem_alerting/#pem_email_groups) tab, and manage or create an email group. -> - Select `Webhooks` to open the [Webhooks](../../04_toc_pem_features/09_pem_alerting/#pem_webhooks) tab, and manage or create a webhook endpoint. -> - Select `Server Configuration` to open the [server configuration](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog and review or modify server configuration settings. +> - Select `Copy Alerts` to open the [Copy Alert Configuration](02_pem_alert_copy/#pem_alert_copy) dialog and copy an alert definition. +> - Select `Alert Templates` to open the [Alert Template](04_pem_custom_alert_templates/#pem_custom_alert_templates) tab, and create or modify an alert template. +> - Select `Email Groups` to open the [Email Groups](05_pem_email_groups/#pem_email_groups) tab, and manage or create an email group. +> - Select `Webhooks` to open the [Webhooks](06_pem_webhooks/#pem_webhooks) tab, and manage or create a webhook endpoint. +> - Select `Server Configuration` to open the [server configuration](../02_pem_server_config/#pem_server_config) dialog and review or modify server configuration settings. > - Select `Help` to open the PEM online help. The `Alerts` table displays the alerts that are defined for the item currently highlighted in the PEM client tree control. You can use the `Alerts` table to modify an existing alert, or to create a new alert. @@ -36,7 +35,7 @@ Use the fields on the `General` tab to provide information about the alert: - Use controls in the `Threshold values` box to define the triggering criteria for the alert. When the value specified in the `Threshold Values` fields evaluates to greater-than or less-than the system value (as specified with the `Operator`), PEM will raise a `Low`, `Medium` or `High` level alert: > - Use the `Operator` drop-down listbox to select the operator that PEM will use when evaluating the current system values. -> - Select a greater-than sign (>) to indicate that the alert should be triggered when the system values are greater than the values entered in the `Threshold values` fields. +> - Select a greater-than sign (>) to indicate that the alert should be triggered when the system values are greater than the values entered in the `Threshold values` fields. > - Select a less-than sign (<) to indicate that the alert should be triggered when the system values are less than the values entered in the `Threshold values` fields. > - Use the threshold fields to specify the values that PEM will compare to the system values to determine if an alert should be raised. Please note that you must specify values for all three thresholds (`Low`, `Medium`, and `High`): > - Enter a value that will trigger a low-severity alert in the `Low` field. @@ -51,15 +50,15 @@ Use the `Notification` tab to specify how PEM will behave if an alert is raised. PEM can send a notification or execute a script if an alert is triggered, or if an alert is cleared. -Use the fields in the `Email` tab to specify the email group that will receive an email notification if the alert is triggered at the specified level. Use the [Email Groups](../../04_toc_pem_features/09_pem_alerting/#pem_email_groups) tab to create an email group that contains the address of the user or users that will be notified when an alert is triggered. To access the `Email Groups` tab, click the `Email Groups` icon located in the `Quick Links` menu of the `Manage Alerts` tab. +Use the fields in the `Email` tab to specify the email group that will receive an email notification if the alert is triggered at the specified level. Use the [Email Groups](05_pem_email_groups/#pem_email_groups) tab to create an email group that contains the address of the user or users that will be notified when an alert is triggered. To access the `Email Groups` tab, click the `Email Groups` icon located in the `Quick Links` menu of the `Manage Alerts` tab. To instruct PEM to send an email when a specific alert level is reached, set the slider next to an alert level to `Yes`, and use the drop-down listbox to select the pre-defined user or group that will be notified. -Please note that you must [configure the PEM Server](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) to use an SMTP server to deliver email before PEM can send email notifications. +Please note that you must [configure the PEM Server](../02_pem_server_config/#pem_server_config) to use an SMTP server to deliver email before PEM can send email notifications. ![Create New Alert - Notification - Webhook tab](../../images/alerting_define_notification_webhook.png) -Use the fields in the `Webhook` tab to specify the webhook endpoints that will receive a notification if the alert is triggered at the specified level. Use the [Webhooks](../../04_toc_pem_features/09_pem_alerting/#pem_webhooks) tab to create an endpoint that contains the details of URL that will be notified when an alert is triggered along with other details like payload. To access the `Webhooks` tab, click the `Webhooks` icon located in `Quick Links` menu of the `Manage Alerts` tab. +Use the fields in the `Webhook` tab to specify the webhook endpoints that will receive a notification if the alert is triggered at the specified level. Use the [Webhooks](06_pem_webhooks/#pem_webhooks) tab to create an endpoint that contains the details of URL that will be notified when an alert is triggered along with other details like payload. To access the `Webhooks` tab, click the `Webhooks` icon located in `Quick Links` menu of the `Manage Alerts` tab. By default `Webhook` notifications will be sent to created endpoints according to their default settings. To disable the `Webhook` set the slider next to `Enable` field to `No`. @@ -73,15 +72,15 @@ Use the `Trap notification` options to configure trap notifications for this ale - Set the `SNMP Ver` to `v1`, `v2`, or `v3` to identify the SNMP version. - Use the `Low alert`, `Med alert` and `High alert` sliders to select the level(s) of alert that will trigger the trap. For example, if you set the slider next to `High alert` to `Yes`, PEM will send a notification when an alert with a high severity level is triggered. -Please note that you must [configure the PEM Server](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) to send notifications to an SNMP trap/notification receiver before notifications can be sent. For sending SNMP v3 traps, pemAgent will use 'User Security Model(USM)' which is in charge of authenticating, encrypting, and decrypting SNMP packets. +Please note that you must [configure the PEM Server](../02_pem_server_config/#pem_server_config) to send notifications to an SNMP trap/notification receiver before notifications can be sent. For sending SNMP v3 traps, pemAgent will use 'User Security Model(USM)' which is in charge of authenticating, encrypting, and decrypting SNMP packets. -Also note while sending SNMP v3 traps, agent will create snmp\_boot\_counter file. This file will get created in location mentioned by batch\_script\_dir parameter in agent.cfg, if this parameter is not configured or if directory is not accessible due to authentication restrictions then in operating systems temporary directory, if that is also not possible then in user’s home directory. +Also note while sending SNMP v3 traps, agent will create snmp_boot_counter file. This file will get created in location mentioned by batch_script_dir parameter in agent.cfg, if this parameter is not configured or if directory is not accessible due to authentication restrictions then in operating systems temporary directory, if that is also not possible then in user’s home directory. -Please see How SNMP traps are formed? <snmp\_trap\_details> +Please see How SNMP traps are formed? <snmp_trap_details> ![Create New Alert - Notification - Nagios tab](../../images/alerting_define_notification_nagios.png) -Use the field in the `Nagios notification` box to instruct the PEM server to notify Nagios network-alerting software when the alert is triggered or cleared. For detailed information about configuring and using Nagios with PEM, please see [Using PEM with Nagios](../../04_toc_pem_features/09_pem_alerting/#using_pem_with_nagios). +Use the field in the `Nagios notification` box to instruct the PEM server to notify Nagios network-alerting software when the alert is triggered or cleared. For detailed information about configuring and using Nagios with PEM, please see [Using PEM with Nagios](09_using_pem_with_nagios/#using_pem_with_nagios). - Set the `Submit passive service check result to Nagios` switch to `Yes` to instruct the PEM server to notify Nagios when the alert is triggered or cleared. @@ -131,7 +130,7 @@ Suppose you need to use the alert configuration placeholder values in an externa For eg: - > ``` bash + > ```bash > #!/bin/bash > > bash /script.sh "%AlertName% %AlertLevel% %AlertDetails%" @@ -141,7 +140,7 @@ Suppose you need to use the alert configuration placeholder values in an externa For eg: - > ``` bash + > ```bash > #!/bin/bash > > export AlertName=%AlertName% @@ -202,6 +201,6 @@ Fields on the `Notifications` tab instruct PEM to: - Execute the script shown in the `Code` field when the alert is triggered. > - To invoke a script on a Linux system, you must modify the entry for `batch_script_user` parameter of agent.cfg file and specify the user that should be used to run the script. You can either specify a non-root user or root for this parameter. If you do not specify a user, or the specified user does not exist, then the script will not be executed. Restart the agent after modifying the file. If pemagent is being run by a non-root user then the value of `batch_script_user` will be ignored and the script will be executed by the same non-root user that is being used for running the pemagent. -> - To invoke a script on a Windows system, set the registry entry for `AllowBatchJobSteps` to true and restart the PEM agent. PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. +> - To invoke a script on a Windows system, set the registry entry for `AllowBatchJobSteps` to true and restart the PEM agent. PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. Click the edit icon to close the editor and add the example to the `Alert List`; click the save icon before closing the `Manage Alerts` tab to save your work. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/02_pem_alert_copy.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/02_pem_alert_copy.mdx index 6c06bea3b62..5a19e5f0ac4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/02_pem_alert_copy.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/02_pem_alert_copy.mdx @@ -4,7 +4,6 @@ title: "Copy Alerts" - To speed up the deployment of alerts in the PEM system, you can copy alert definitions from one object to one or more target objects. To copy alerts from an object, highlight the object name (from which you will copy alerts) in the PEM client tree control, and select the `Manage Alerts...` option from the `Management` menu. When the `Manage Alerts` tab opens, click the `Copy Alerts` icon (located on the `Quick Links` toolbar) to open the `Copy Alert Configuration` dialog. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/03_pem_alert_templates.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/03_pem_alert_templates.mdx index 5b9c3d9b2cd..4f3cb9bbeee 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/03_pem_alert_templates.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/03_pem_alert_templates.mdx @@ -4,7 +4,6 @@ title: "Alert Templates" - An alert definition contains a system-defined or user-defined set of conditions that PEM compares to the system statistics; if the statistics deviate from the boundaries specified for that statistic, the alert triggers, and the PEM client displays a warning on the `Alerts Overview` page, and optionally sends a notification to a monitoring user. The table below lists the system-defined alert templates that you can use to create an alert; please note that this list is subject to change, and may vary by system. @@ -14,7 +13,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat ## Templates applicable on Agent | Template Name | Description | Details | -|------------------------------------------------------------------------|-----------------------------------------------------------------------------|---------| +| ---------------------------------------------------------------------- | --------------------------------------------------------------------------- | ------- | | Load Average (1 minute) | 1-minute system load average. | | | Load Average (5 minutes) | 5-minute system load average. | | | Load Average (15 minutes) | 15-minute system load average. | | @@ -50,7 +49,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat ## Templates applicable on Server | Template Name | Description | Details | -|-----------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | | Total table bloat in server | The total space wasted by tables in server, in MB. | | | Largest table (by multiple of unbloated size) | Largest table in server, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB. | | | Highest table bloat in server | The most space wasted by a table in server, in MB. | Yes | @@ -60,9 +59,9 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat | Number of WAL files | Total number of Write Ahead Log files. | | | Number of prepared transactions | Number of transactions in prepared state. | | | Total connections | Total number of connections in the server. | Yes | -| Total connections as percentage of max\_connections | Total number of connections in the server as a percentage of maximum connections allowed on server, settings. | | -| Unused, non-superuser connections | Number of unused, non-superuser connections on the server, user\_info, settings. | | -| Unused, non-superuser connections as percentage of max\_connections | Number of unused, non-superuser connections on the server as a percentage of max\_connections, user\_info, settings. | | +| Total connections as percentage of max_connections | Total number of connections in the server as a percentage of maximum connections allowed on server, settings. | | +| Unused, non-superuser connections | Number of unused, non-superuser connections on the server, user_info, settings. | | +| Unused, non-superuser connections as percentage of max_connections | Number of unused, non-superuser connections on the server as a percentage of max_connections, user_info, settings. | | | Ungranted locks | Number of ungranted locks in server. | | | Percentage of buffers written by backends | The percentage of buffers written by backends vs. the total buffers written. | | | Percentage of buffers written by checkpoint | The percentage of buffers written by the checkpoints vs. the total buffers written. | | @@ -70,7 +69,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat | Buffers allocated per second | Number of buffers allocated per second, over the last two probe cycles. | | | Connections in idle state | Number of connections in server that are in idle state. | Yes | | Connections in idle-in-transaction state | Number of connections in server that are in idle-in-transaction state. | Yes | -| Connections in idle-in-transaction state,as percentage of max\_connections | Number of connections in server that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings | | +| Connections in idle-in-transaction state,as percentage of max_connections | Number of connections in server that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings | | | Long-running idle connections | Number of connections in the server that have been idle for more than N seconds. | Yes | | Long-running idle connections and idle transactions | Number of connections in the server that have been idle or idle-in-transaction for more than N seconds. | Yes | | Long-running idle transactions | Number of connections in the server that have been idle in transaction for more than N seconds. | Yes | @@ -103,7 +102,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat | Sequence Count | Total number of sequences in server. | | | A user expires in N days | Number of days before a user's validity expires. | | | Index size as a percentage of table size | Size of the indexes in server, as a percentage of their tables' size. | | -| Largest index by table-size percentage | Largest index in server, calculated as percentage of its table's size, oc\_index, table\_size. | | +| Largest index by table-size percentage | Largest index in server, calculated as percentage of its table's size, oc_index, table_size. | | | Number of ERRORS in the logfile on server M in the last X hours | The number of ERRORS in the logfile on server M in last X hours. | | | Number of WARNINGS in the logfile on server M in the last X hours | The number of WARNINGS in logfile on server M in the last X hours. | | | Number of WARNINGS or ERRORS in the logfile on server M in the last X hours | The number of WARNINGS or ERRORS in the logfile on server M in the last X hours. | | @@ -133,76 +132,76 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat ## Templates applicable on Database -| Template Name | Description | Details | -|----------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------| -| Total table bloat in database | The total space wasted by tables in database, in MB. | | -| Largest table (by multiple of unbloated size) | Largest table in database, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB. | | -| Highest table bloat in database | The most space wasted by a table in database, in MB. | | -| Average table bloat in database | The average space wasted by tables in database, in MB. | | -| Table size in database | The size of tables in database, in MB. | Yes | -| Database size | The size of the database, in MB. | | -| Total connections | Total number of connections in the database. | Yes | -| Total connections as percentage of max\_connections | Total number of connections in the database as a percentage of maximum connections allowed on server, settings. | | -| Ungranted locks | Number of ungranted locks in database. | | -| Connections in idle state | Number of connections in database that are in idle state. | Yes | -| Connections in idle-in-transaction state | Number of connections in database that are in idle-in-transaction state. | Yes | -| Connections in idle-in-transaction state,as percentage of max\_connections | Number of connections in database that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings. | | -| Long-running idle connections | Number of connections in the database that have been idle for more than N seconds. | Yes | -| Long-running idle connections and idle transactions | Number of connections in the database that have been idle or idle-in-transaction for more than N seconds. | Yes | -| Long-running idle transactions | Number of connections in the database that have been idle in transaction for more than N seconds. | Yes | -| Long-running transactions | Number of transactions in database that have been running for more than N seconds. | Yes | -| Long-running queries | Number of queries in database that have been running for more than N seconds. It does not include the long running vacuum or auto vacuum queries. | Yes | -| Long-running vacuums | Number of vacuum operations in database that have been running for more than N seconds. | Yes | -| Long-running autovacuums | Number of autovacuum operations in database that have been running for more than N seconds. | Yes | -| Committed transactions percentage | Percentage of transactions in the database that committed vs. that rolled-back over last N minutes. | | -| Shared buffers hit percentage | Percentage of block read requests in the database that were satisfied by shared buffers, over last N minutes. | | -| InfiniteCache buffers hit percentage | Percentage of block read requests in the database that were satisfied by InfiniteCache, over last N minutes. | | -| Tuples fetched | Tuples fetched from database over last N minutes. | | -| Tuples returned | Tuples returned from database over last N minutes. | | -| Tuples inserted | Tuples inserted into database over last N minutes. | | -| Tuples updated | Tuples updated in database over last N minutes. | | -| Tuples deleted | Tuples deleted from database over last N minutes. | | -| Tuples hot updated | Tuples hot updated in database, over last N minutes. | | -| Sequential Scans | Number of full table scans in database, over last N minutes. | | -| Index Scans | Number of index scans in database, over last N minutes. | | -| Hot update percentage | Percentage of hot updates in the database over last N minutes. | | -| Live Tuples | Number of estimated live tuples in database. | | -| Dead Tuples | Number of estimated dead tuples in database. | | -| Dead tuples percentage | Percentage of estimated dead tuples in database. | | -| Last Vacuum | Hours since last vacuum on the database. | | -| Last AutoVacuum | Hours since last autovacuum on the database. | | -| Last Analyze | Hours since last analyze on the database. | | -| Last AutoAnalyze | Hours since last autoanalyze on the database. | | -| Table Count | Total number of tables in database. | | -| Function Count | Total number of functions in database. | | -| Sequence Count | Total number of sequences in database. | | -| Index size as a percentage of table size | Size of the indexes in database, as a percentage of their tables' size. | | -| Largest index by table-size percentage | Largest index in database, calculated as percentage of its table's size, oc\_index, table\_size | | -| Database Frozen XID | The age (in transactions before the current transaction) of the database's frozen transaction ID. | | -| Number of attacks detected in the last N minutes | The number of SQL injection attacks occurred in the last N minutes. | | -| Number of attacks detected in the last N minutes by username | The number of SQL injection attacks occurred in the last N minutes by username. | | -| Queries that have been cancelled due to dropped tablespaces | Streaming Replication: number of queries that have been cancelled due to dropped tablespaces. | | -| Queries that have been cancelled due to lock timeouts | Streaming Replication: number of queries that have been cancelled due to lock timeouts. | | -| Queries that have been cancelled due to old snapshots | Streaming Replication: number of queries that have been cancelled due to old snapshots. | | -| Queries that have been cancelled due to pinned buffers | Streaming Replication: number of queries that have been cancelled due to pinned buffers. | | -| Queries that have been cancelled due to deadlocks | Streaming Replication: number of queries that have been cancelled due to deadlocks. | | -| Total events lagging in all slony clusters | Slony Replication: total events lagging in all slony clusters. | Yes | -| Events lagging in one slony cluster | Slony Replication: events lagging in one slony cluster. | | -| Lag time (minutes) in one slony cluster | Slony Replication: lag time (minutes) in one slony cluster. | | -| Total rows lagging in xdb single primary replication | xDB Replication: Total rows lagging in xdb single primary replication | Yes | -| Total rows lagging in xdb multi primary replication | xDB Replication: Total rows lagging in xdb multi primary replication | Yes | -| Total materialized view bloat in database | The total space wasted by materialized views in database, in MB. | | -| Largest materialized view (by multiple of unbloated size) | Largest materialized view in database, calculated as a multiple of its own estimated unbloated size; exclude materialized views smaller than N MB. | | -| Highest materialized view bloat in database | The most space wasted by a materialized view in database, in MB. | | -| Average materialized view bloat in database | The average space wasted by materialized views in database, in MB. | | -| Materialized view size in database | The size of materialized view in database, in MB. | | -| View Count | Total number of views in database. | | -| Materialized View Count | Total number of materialized views in database. | | +| Template Name | Description | Details | +| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| Total table bloat in database | The total space wasted by tables in database, in MB. | | +| Largest table (by multiple of unbloated size) | Largest table in database, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB. | | +| Highest table bloat in database | The most space wasted by a table in database, in MB. | | +| Average table bloat in database | The average space wasted by tables in database, in MB. | | +| Table size in database | The size of tables in database, in MB. | Yes | +| Database size | The size of the database, in MB. | | +| Total connections | Total number of connections in the database. | Yes | +| Total connections as percentage of max_connections | Total number of connections in the database as a percentage of maximum connections allowed on server, settings. | | +| Ungranted locks | Number of ungranted locks in database. | | +| Connections in idle state | Number of connections in database that are in idle state. | Yes | +| Connections in idle-in-transaction state | Number of connections in database that are in idle-in-transaction state. | Yes | +| Connections in idle-in-transaction state,as percentage of max_connections | Number of connections in database that are in idle-in-transaction state, as a percentage of maximum connections allowed on server, settings. | | +| Long-running idle connections | Number of connections in the database that have been idle for more than N seconds. | Yes | +| Long-running idle connections and idle transactions | Number of connections in the database that have been idle or idle-in-transaction for more than N seconds. | Yes | +| Long-running idle transactions | Number of connections in the database that have been idle in transaction for more than N seconds. | Yes | +| Long-running transactions | Number of transactions in database that have been running for more than N seconds. | Yes | +| Long-running queries | Number of queries in database that have been running for more than N seconds. It does not include the long running vacuum or auto vacuum queries. | Yes | +| Long-running vacuums | Number of vacuum operations in database that have been running for more than N seconds. | Yes | +| Long-running autovacuums | Number of autovacuum operations in database that have been running for more than N seconds. | Yes | +| Committed transactions percentage | Percentage of transactions in the database that committed vs. that rolled-back over last N minutes. | | +| Shared buffers hit percentage | Percentage of block read requests in the database that were satisfied by shared buffers, over last N minutes. | | +| InfiniteCache buffers hit percentage | Percentage of block read requests in the database that were satisfied by InfiniteCache, over last N minutes. | | +| Tuples fetched | Tuples fetched from database over last N minutes. | | +| Tuples returned | Tuples returned from database over last N minutes. | | +| Tuples inserted | Tuples inserted into database over last N minutes. | | +| Tuples updated | Tuples updated in database over last N minutes. | | +| Tuples deleted | Tuples deleted from database over last N minutes. | | +| Tuples hot updated | Tuples hot updated in database, over last N minutes. | | +| Sequential Scans | Number of full table scans in database, over last N minutes. | | +| Index Scans | Number of index scans in database, over last N minutes. | | +| Hot update percentage | Percentage of hot updates in the database over last N minutes. | | +| Live Tuples | Number of estimated live tuples in database. | | +| Dead Tuples | Number of estimated dead tuples in database. | | +| Dead tuples percentage | Percentage of estimated dead tuples in database. | | +| Last Vacuum | Hours since last vacuum on the database. | | +| Last AutoVacuum | Hours since last autovacuum on the database. | | +| Last Analyze | Hours since last analyze on the database. | | +| Last AutoAnalyze | Hours since last autoanalyze on the database. | | +| Table Count | Total number of tables in database. | | +| Function Count | Total number of functions in database. | | +| Sequence Count | Total number of sequences in database. | | +| Index size as a percentage of table size | Size of the indexes in database, as a percentage of their tables' size. | | +| Largest index by table-size percentage | Largest index in database, calculated as percentage of its table's size, oc_index, table_size | | +| Database Frozen XID | The age (in transactions before the current transaction) of the database's frozen transaction ID. | | +| Number of attacks detected in the last N minutes | The number of SQL injection attacks occurred in the last N minutes. | | +| Number of attacks detected in the last N minutes by username | The number of SQL injection attacks occurred in the last N minutes by username. | | +| Queries that have been cancelled due to dropped tablespaces | Streaming Replication: number of queries that have been cancelled due to dropped tablespaces. | | +| Queries that have been cancelled due to lock timeouts | Streaming Replication: number of queries that have been cancelled due to lock timeouts. | | +| Queries that have been cancelled due to old snapshots | Streaming Replication: number of queries that have been cancelled due to old snapshots. | | +| Queries that have been cancelled due to pinned buffers | Streaming Replication: number of queries that have been cancelled due to pinned buffers. | | +| Queries that have been cancelled due to deadlocks | Streaming Replication: number of queries that have been cancelled due to deadlocks. | | +| Total events lagging in all slony clusters | Slony Replication: total events lagging in all slony clusters. | Yes | +| Events lagging in one slony cluster | Slony Replication: events lagging in one slony cluster. | | +| Lag time (minutes) in one slony cluster | Slony Replication: lag time (minutes) in one slony cluster. | | +| Total rows lagging in xdb single primary replication | xDB Replication: Total rows lagging in xdb single primary replication | Yes | +| Total rows lagging in xdb multi primary replication | xDB Replication: Total rows lagging in xdb multi primary replication | Yes | +| Total materialized view bloat in database | The total space wasted by materialized views in database, in MB. | | +| Largest materialized view (by multiple of unbloated size) | Largest materialized view in database, calculated as a multiple of its own estimated unbloated size; exclude materialized views smaller than N MB. | | +| Highest materialized view bloat in database | The most space wasted by a materialized view in database, in MB. | | +| Average materialized view bloat in database | The average space wasted by materialized views in database, in MB. | | +| Materialized view size in database | The size of materialized view in database, in MB. | | +| View Count | Total number of views in database. | | +| Materialized View Count | Total number of materialized views in database. | | ## Templates applicable on Schema | Template Name | Description | Details | -|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| --------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | Total table bloat in schema | The total space wasted by tables in schema, in MB. | Yes | | Largest table (by multiple of unbloated size) | Largest table in schema, calculated as a multiple of its own estimated unbloated size; exclude tables smaller than N MB. | | | Highest table bloat in schema | The most space wasted by a table in schema, in MB. | | @@ -226,7 +225,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat | Function Count | Total number of functions in schema. | | | Sequence Count | Total number of sequences in schema. | | | Index size as a percentage of table size | Size of the indexes in schema, as a percentage of their table's size. | | -| Largest index by table-size percentage | Largest index in schema, calculated as percentage of its table's size, oc\_index, table\_size | | +| Largest index by table-size percentage | Largest index in schema, calculated as percentage of its table's size, oc_index, table_size | | | Materialized View bloat | Space wasted by the materialized view, in MB. | | | Total materialized view bloat in schema | The total space wasted by materialized views in schema, in MB. | | | Materialized view size as a multiple of ubloated size | Size of the materialized view as a multiple of estimated unbloated size. | | @@ -242,7 +241,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat ## Templates applicable on Table | Template Name | Description | Details | -|-------------------------------------------|------------------------------------------------------------------------------------------------|---------| +| ----------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | | Table bloat | Space wasted by the table, in MB. | | | Table size | The size of table, in MB. | | | Table size as a multiple of ubloated size | Size of the table as a multiple of estimated unbloated size. | | @@ -267,7 +266,7 @@ Within the table, the alerts are sorted by the target of the alert. The `Templat ## Global Templates | Template Name | Description | Details | -|---------------|-----------------------------------------------------|---------| +| ------------- | --------------------------------------------------- | ------- | | Agents Down | Number of agents that haven't reported in recently. | | | Servers Down | Number of servers that are currently inaccessible. | | | Alert Errors | Number of alerts in an error state. | | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/04_pem_custom_alert_templates.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/04_pem_custom_alert_templates.mdx index 8b33e5e307e..398a61a0131 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/04_pem_custom_alert_templates.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/04_pem_custom_alert_templates.mdx @@ -4,8 +4,7 @@ title: "Custom Alert Templates" - -An alert template is a prototype that defines the properties of an [alert](../../04_toc_pem_features/09_pem_alerting/#pem_alerting_dialog). An alert instructs the server to compare the current state of the monitored object to a threshold (specified in the alert template) to determine if a situation exists that requires administrative attention. +An alert template is a prototype that defines the properties of an [alert](01_pem_alerting_dialog/#pem_alerting_dialog). An alert instructs the server to compare the current state of the monitored object to a threshold (specified in the alert template) to determine if a situation exists that requires administrative attention. You can use the `Alert Templates` tab to define a custom alert template or view the definitions of existing alert templates. To open the `Alert Template` tab, select the `Manage Alerts...` menu option from the `Management` menu; when the `Manage Alerts` tab opens, select `Alert Templates` from the `Quick Links` menu. @@ -41,11 +40,12 @@ Use fields on the `General` tab to specify general information about the templat - Use the `History retention` field to specify the number of days that the result of the alert execution will be stored on the PEM server. - Use the `Threshold unit` field to specify the unit type of the threshold value. - Use fields in the `Auto create` box to indicate if PEM should use the template to generate an automatic alert. If enabled, PEM will automatically create an alert when a new server or agent (as specified by the `Target type` drop-down listbox) is added, and delete that alert when the target object is dropped. + - Move the `Auto create?` slider to `Yes` to indicate that PEM should automatically create alerts based on the template. If you modify an existing alert template, changing the `Auto create?` slider from `No` to `Yes`, PEM will create alerts on the existing agents and servers. Please note that if you change the slider from `Yes` to `No`, the default threshold values in existing alerts will be erased, and cannot be recovered. - Use the `Operator` drop-down listbox to select the operator that PEM will use when evaluating the current system values. - > Select a greater-than sign (>) to indicate that the alert should be triggered when the system values are greater than the values entered in the `Threshold values` fields. + > Select a greater-than sign (>) to indicate that the alert should be triggered when the system values are greater than the values entered in the `Threshold values` fields. > > Select a less-than sign (<) to indicate that the alert should be triggered when the system values are less than the values entered in the `Threshold values` fields. @@ -78,17 +78,17 @@ To modify an existing parameter definition, highlight a parameter name in the li Use the `Code` field on the `SQL` tab to provide the text of the SQL query that the server will invoke when executing the alert. The SQL query will provide the result against which the threshold value is compared; if the alert result deviates from the specified threshold value, an alert will be raised. -Within the query, parameters defined on the `Parameters` tab should be referenced (sequentially) by the variable `param_x`, where `x` indicates the position of the parameter definition within the parameter list. For example, `param_1` refers to the first parameter in the parameter list, param\_2 refers to the second parameter in the parameter list, and so on. +Within the query, parameters defined on the `Parameters` tab should be referenced (sequentially) by the variable `param_x`, where `x` indicates the position of the parameter definition within the parameter list. For example, `param_1` refers to the first parameter in the parameter list, param_2 refers to the second parameter in the parameter list, and so on. The query can also include the following pre-defined variables: -| Variable Description | Variable Name | -|-----------------------------------------|---------------------| -| agent identifier | '${agent\_id}' | -| server identifier | '${server\_id}' | -| database name | '${database\_name}' | -| schema name | '${schema\_name}' | -| table, index, sequence or function name | '${object\_name}' | +| Variable Description | Variable Name | +| --------------------------------------- | ------------------ | +| agent identifier | '${agent_id}' | +| server identifier | '${server_id}' | +| database name | '${database_name}' | +| schema name | '${schema_name}' | +| table, index, sequence or function name | '${object_name}' | Please Note: If the specified query is dependent on one or more probes from different levels within the PEM hierarchy (server, database, schema, etc.), and a probe becomes disabled, any resulting alerts will be displayed as follows: @@ -99,7 +99,7 @@ Use the `Detailed Information SQL` field to provide a SQL query that will be inv After defining a new alert template, click the `Add/Change` button to save the definition and add the template to the `Alert Templates list`. Click `Cancel` to exit the `Alert Templates` dialog without saving changes. -After defining a template, you can use the [Manage Alerts](../../04_toc_pem_features/09_pem_alerting/#pem_alerting_dialog) tab to create and enable an alert based on the template. +After defining a template, you can use the [Manage Alerts](01_pem_alerting_dialog/#pem_alerting_dialog) tab to create and enable an alert based on the template. ## Deleting an Alert Template diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/05_pem_email_groups.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/05_pem_email_groups.mdx index 8dbe732e434..d4a194273b1 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/05_pem_email_groups.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/05_pem_email_groups.mdx @@ -4,7 +4,6 @@ title: "Email Groups" - Postgres Enterprise Manager monitors your system for conditions that require user attention. You can use an email group to specify the email addresses of users that the server will notify if current values deviate from threshold values specified in an alert definition. An email group has the flexibility to notify multiple users, or target specific users during user-defined time periods. Please note that you must configure the PEM Server to use an SMTP server to deliver email before PEM can send email notifications. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/06_pem_webhooks.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/06_pem_webhooks.mdx index 0a79bc6be74..67fe1f97c3c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/06_pem_webhooks.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/06_pem_webhooks.mdx @@ -4,7 +4,6 @@ title: "Webhooks" - Postgres Enterprise Manager monitors your system for conditions that require user attention. You can use a webhook to create the endpoints that will receive a notification if current values deviate from threshold values specified in an alert definition. PEM sends a notification to multiple webhook endpoints, or to specific target webhook endpoints based on the events triggered. Please note that you must configure the PEM Server to use webhooks to receive notification of alert events on threshold value violations in your configured applications. @@ -42,10 +41,10 @@ The above `Enable?` setting will work only if `enable_webhook` parameter is set ## Defining a Webhook SSL configurations -You can define the Webhook SSL parameters in the respective agent configuration file or registry in windows. You can find the list of Webhook SSL parameters in PEM Agent Configuration Parameters <pem\_agent\_config\_params> section. If you add or remove any of the agent configuration parameters, you must restart the agent to apply them. +You can define the Webhook SSL parameters in the respective agent configuration file or registry in windows. You can find the list of Webhook SSL parameters in PEM Agent Configuration Parameters <pem_agent_config_params> section. If you add or remove any of the agent configuration parameters, you must restart the agent to apply them. -> - On 32 bit Windows systems, PEM registry entries for Webhooks are located in HKEY\_LOCAL\_MACHINE\\Software\\EnterpriseDB\\PEM\\agent\\WEBHOOK -> - On 64 bit Windows systems, PEM registry entries for Webhooks are located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent\\WEBHOOK +> - On 32 bit Windows systems, PEM registry entries for Webhooks are located in HKEY_LOCAL_MACHINE\\Software\\EnterpriseDB\\PEM\\agent\\WEBHOOK +> - On 64 bit Windows systems, PEM registry entries for Webhooks are located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent\\WEBHOOK > - On Linux systems, PEM configuration options for Webhooks are stored in the agent.cfg file, located (by default) in /usr/edb/pem/agent/etc ![Example - Webhook SSL Parameters in agent.cfg file](../../images/webhook_ssl_config.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/07_snmp_mib_generation.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/07_snmp_mib_generation.mdx index f33c63ae45e..39e2e88278f 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/07_snmp_mib_generation.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/07_snmp_mib_generation.mdx @@ -4,7 +4,8 @@ title: "SNMP MIB Generation" - PEM allows alerts to be sent as SNMP traps or notifications to receivers such as network monitoring tools. To enable such tools to understand these notifications, a MIB file may be generated that describes the different alerts and accompanying information that PEM may send. The `pem.generate_alert_mib()` SQL function in the PEM database may be used to generate the MIB file from the alert templates defined in the database. For example: - psql.exe -U postgres -d pem -A -t -c "SELECT pem.generate_alert_mib();" > PEM-ALERTING-MIB +``` +psql.exe -U postgres -d pem -A -t -c "SELECT pem.generate_alert_mib();" > PEM-ALERTING-MIB +``` diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/08_snmp_trap_details.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/08_snmp_trap_details.mdx index 3c076bbecc7..e664cdb0e03 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/08_snmp_trap_details.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/08_snmp_trap_details.mdx @@ -4,11 +4,10 @@ title: "SNMP Trap Details" - Every SNMP trap send by PEM starts with oid .1.3.6.1.4.1.27645.5444, Significance of each identifier in oid is as follow’s. | Identifier | Meaning | -|------------|---------------------------------------------------------------------------------| +| ---------- | ------------------------------------------------------------------------------- | | 1 | ISO, ISO is the group that established the OID standard | | 3 | org, Organization identification schemes registered according to ISO/IEC 6523-2 | | 6 | dod, United States Department of Defense (DoD) | @@ -20,12 +19,12 @@ Every SNMP trap send by PEM starts with oid .1.3.6.1.4.1.27645.5444, Significanc ## How OID's are formed? -PEM’s SNMP trap has following oid format 1.3.6.1.4.1.27645.5444.<alert\_target\_level\_identifier>.<alert\_identifier> +PEM’s SNMP trap has following oid format 1.3.6.1.4.1.27645.5444.<alert_target_level_identifier>.<alert_identifier> -Following table lists down possible values for <alert\_target\_level\_identifier>. +Following table lists down possible values for <alert_target_level_identifier>. | Identifier | Alert Target Level | -|------------|--------------------| +| ---------- | ------------------ | | 1 | Agent | | 2 | Server | | 3 | Database | @@ -38,14 +37,14 @@ Following table lists down possible values for <alert <alert\_identifier> is unique identifier for each alert, which you can find in snmp\_oid column of pem.alert\_template table. -For example, snmp\_oid for Agent Down alert template is 34, hence trapOID for agent down alert will be 1.3.6.1.4.1.27645.5444.1.34 +For example, snmp_oid for Agent Down alert template is 34, hence trapOID for agent down alert will be 1.3.6.1.4.1.27645.5444.1.34 ## How OID's for binding variables are formed? -Every binding variable oid has following format 1.3.6.1.4.1.27645.5444.10.<binding\_variable\_identifier>, where 10 is identifier for binding variable Following table lists down possible values for <binding\_variable\_identifier> +Every binding variable oid has following format 1.3.6.1.4.1.27645.5444.10.<binding_variable_identifier>, where 10 is identifier for binding variable Following table lists down possible values for <binding_variable_identifier> | Identifier | Variable Name | -|------------|---------------------| +| ---------- | ------------------- | | 1 | alertName | | 2 | agentID | | 3 | serverID | @@ -65,15 +64,17 @@ Every binding variable oid has following format 1.3.6.1.4.1.27645.5444.10.<bi For example, 1.3.6.1.4.1.27645.5444.10.1 is oid for binding variable alertName. -Details of each snmp traps in pem.snmp\_spool table. For example, - - pem=# select * from pem.snmp_spool; - -[ RECORD 1 ]----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - id | 1 - trap_oid | .1.3.6.1.4.1.27645.5444.1.34 - enterprise_oid | .1.3.6.1.4.1.27645.5444 - trap_version | 2 - varbinding_oid | .1.3.6.1.4.1.27645.5444.10.1|.1.3.6.1.4.1.27645.5444.10.2|.1.3.6.1.4.1.27645.5444.10.4|.1.3.6.1.4.1.27645.5444.10.9|.1.3.6.1.4.1.27645.5444.10.10|.1.3.6.1.4.1.27645.5444.10.11|.1.3.6.1.4.1.27645.5444.10.12|.1.3.6.1.4.1.27645.5444.10.13|.1.3.6.1.4.1.27645.5444.10.14 - varbinding_value | Agent Down||Postgres Enterprise Manager Host|{0.1,0.2,0.3}|0|1|CLEAR|HIGH|2020-06-22 15:51:03.266437+10 - sent_status | s - recorded_time | 22-JUN-20 15:51:03.266437 +10:00 +Details of each snmp traps in pem.snmp_spool table. For example, + +``` +pem=# select * from pem.snmp_spool; +-[ RECORD 1 ]----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +id | 1 +trap_oid | .1.3.6.1.4.1.27645.5444.1.34 +enterprise_oid | .1.3.6.1.4.1.27645.5444 +trap_version | 2 +varbinding_oid | .1.3.6.1.4.1.27645.5444.10.1|.1.3.6.1.4.1.27645.5444.10.2|.1.3.6.1.4.1.27645.5444.10.4|.1.3.6.1.4.1.27645.5444.10.9|.1.3.6.1.4.1.27645.5444.10.10|.1.3.6.1.4.1.27645.5444.10.11|.1.3.6.1.4.1.27645.5444.10.12|.1.3.6.1.4.1.27645.5444.10.13|.1.3.6.1.4.1.27645.5444.10.14 +varbinding_value | Agent Down||Postgres Enterprise Manager Host|{0.1,0.2,0.3}|0|1|CLEAR|HIGH|2020-06-22 15:51:03.266437+10 +sent_status | s +recorded_time | 22-JUN-20 15:51:03.266437 +10:00 +``` diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/09_using_pem_with_nagios.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/09_using_pem_with_nagios.mdx index 90c67000be5..fa93d96c1b3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/09_using_pem_with_nagios.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/09_using_pem_with_nagios.mdx @@ -4,7 +4,6 @@ title: "Using PEM with Nagios" - The PEM server can send a passive alert result to Nagios network-alerting software when an alert is triggered. To instruct the PEM server to notify Nagios of a triggered alert, you must: - Enable Nagios notification for each alert that will trigger a notification from the PEM server to Nagios. Please note that PEM alerting must be configured before you create the host.cfg file and services.cfg file. @@ -17,7 +16,7 @@ Detailed information about each configuration step is listed below. After configuring the server to enable Nagios alerting, any triggered alerts will send a passive check result to the Nagios service. The syntax of a passive alert is: -> \[*timestamp*\] PROCESS\_SERVICE\_CHECK\_RESULT; `host_name` ; `service_name` ; `service_status` ; +> \[*timestamp*] PROCESS_SERVICE_CHECK_RESULT; `host_name` ; `service_name` ; `service_status` ; Where: @@ -40,7 +39,7 @@ Where: ## Enabling Nagios Notification for an Alert -The PEM server maintains a unique set of notification properties for each enabled alert. Use the `Notification` tab of the `` `Manage Alerts `` <pem\_alerting\_dialog> tab to specify that (when triggered), a given alert will send an alert notice to Nagios. To modify the notification properties of an alert, right-click on the name of the object monitored by the alert, and select Manage Alerts... from the Management menu. When the Manage Alerts tab opens, locate the alert, and then click the edit button to the left of the alert name in the Alerts list. When the Manage Alerts tab opens, select the Notification\` tab. +The PEM server maintains a unique set of notification properties for each enabled alert. Use the `Notification` tab of the `` `Manage Alerts `` <pem_alerting_dialog> tab to specify that (when triggered), a given alert will send an alert notice to Nagios. To modify the notification properties of an alert, right-click on the name of the object monitored by the alert, and select Manage Alerts... from the Management menu. When the Manage Alerts tab opens, locate the alert, and then click the edit button to the left of the alert name in the Alerts list. When the Manage Alerts tab opens, select the Notification\` tab. ![Nagios Alert - Notification tab](../../images/nagios_alert_notification.png) @@ -48,7 +47,7 @@ To enable Nagios notification, move the slider next to `Submit passive service c ## Configuring Nagios-related behavior of the PEM Server -You can use the `Server Configuration` dialog to provide information about your Nagios configuration to the PEM server. To open the [Server Configuration](../../04_toc_pem_features/02_pem_server_config/#pem_server_config) dialog, select `Server Configuration...` from the PEM client's `Management` menu. +You can use the `Server Configuration` dialog to provide information about your Nagios configuration to the PEM server. To open the [Server Configuration](../02_pem_server_config/#pem_server_config) dialog, select `Server Configuration...` from the PEM client's `Management` menu. ![Nagios Server Configuration options](../../images/nagios_server_configuration.png) @@ -71,11 +70,11 @@ In most cases (when PEM is installed in a default configuration), you will not b Before modifying the Nagios configuration file, use the following command to create a hosts.cfg file that contains information about the PEM hosts that reside on the local system: -> ./psql -U postgres -p 5433 -d pem -A -t -c "select pem.create\_nagios\_host\_config('generic-host')" > /usr/local/nagios/etc/objects/hosts.cfg +> ./psql -U postgres -p 5433 -d pem -A -t -c "select pem.create_nagios_host_config('generic-host')" > /usr/local/nagios/etc/objects/hosts.cfg Then, use the following command to create a `services.cfg` file that contains information about the PEM services that reside on the local system: -> ./psql -U postgres -p 5433 -d pem -A -t -c "select pem.create\_nagios\_service\_config('generic-service')" > /usr/local/nagios/etc/objects/services.cfg +> ./psql -U postgres -p 5433 -d pem -A -t -c "select pem.create_nagios_service_config('generic-service')" > /usr/local/nagios/etc/objects/services.cfg If you wish to use a custom template.cfg file entry, specify the entry name in place of generic-host or generic-service in the above commands. @@ -83,7 +82,7 @@ If you wish to use a custom template.cfg file entry, specify the entry name in p After creating the host.cfg and services.cfg files, you must specify their location in the Nagios configuration file (by default, /usr/local/nagios/etc/nagios.cfg). Modify the configuration file, adding entries that specify the location of the files: -> cfg\_file=/usr/local/etc/objects/hosts.cfg cfg\_file=/usr/local/etc/objects/services.cfg +> cfg_file=/usr/local/etc/objects/hosts.cfg cfg_file=/usr/local/etc/objects/services.cfg You can use the following command to confirm that Nagios is properly configured: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/index.mdx index e87d094bc61..8e39ec0977d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/09_pem_alerting/index.mdx @@ -4,8 +4,7 @@ title: "Alerting" - -Postgres Enterprise Manager monitors a system for conditions that require user attention. An alert definition contains a system-defined or user-defined set of conditions that PEM compares to the system statistics; if the statistics deviate from the boundaries specified for that statistic, the alert triggers, displaying a `High` (red), `Low` (yellow) or `Medium` (orange) severity warning in the left-most column of the `Alerts Status` table on the `Global Overview` dashboard, and optionally sends a notification via email to [Email Groups](../04_toc_pem_features/09_pem_alerting/#pem_email_groups) or SNMP trap/notification receivers <snmp\_mib\_generation>. +Postgres Enterprise Manager monitors a system for conditions that require user attention. An alert definition contains a system-defined or user-defined set of conditions that PEM compares to the system statistics; if the statistics deviate from the boundaries specified for that statistic, the alert triggers, displaying a `High` (red), `Low` (yellow) or `Medium` (orange) severity warning in the left-most column of the `Alerts Status` table on the `Global Overview` dashboard, and optionally sends a notification via email to [Email Groups](05_pem_email_groups/#pem_email_groups) or SNMP trap/notification receivers <snmp_mib_generation>. ![Alerts Status table](../../images/alerts_status_table.png) @@ -17,7 +16,7 @@ PEM also provides an interface that allows you to create customized alerts. Each ## Using the Alerts Dashboard -Use the `Dashboards` menu (at the top of the `Global Overview` dashboard) to access the [Alerts Dashboard](../04_toc_pem_features/01_dashboards/#alerts_dashboard). The Alerts Dashboard displays a summary of the active alerts and the status of each alert: +Use the `Dashboards` menu (at the top of the `Global Overview` dashboard) to access the [Alerts Dashboard](../01_dashboards/01_alerts_dashboard/#alerts_dashboard). The Alerts Dashboard displays a summary of the active alerts and the status of each alert: ![Alerts dashboard](../../images/alerts_dashboard.png) @@ -29,26 +28,26 @@ The `Alert Errors` table displays configuration-related errors (eg. accidentally ## Managing Alerts -PEM's `Manage Alerts` tab allows you to define custom alerts or modify existing alerts. To open the [Manage Alerts tab](../04_toc_pem_features/09_pem_alerting/#pem_alerting_dialog), select `Manage Alerts...` from the `Management` menu. The Manage Alerts tab provides an easy way to review the alerts that are currently defined for the object that is highlighted in the PEM client tree control; simply select an object to see the alerts that are defined for that object. +PEM's `Manage Alerts` tab allows you to define custom alerts or modify existing alerts. To open the [Manage Alerts tab](01_pem_alerting_dialog/#pem_alerting_dialog), select `Manage Alerts...` from the `Management` menu. The Manage Alerts tab provides an easy way to review the alerts that are currently defined for the object that is highlighted in the PEM client tree control; simply select an object to see the alerts that are defined for that object. ![Manage Alerts tab](../../images/alerting_manage_alerts.png) The `Manage Alerts` tab also provides `Quick Links` that provide quick access to dialogs that allow you to: -> - [Copy an alert](../04_toc_pem_features/09_pem_alerting/#pem_alert_copy) from one object to one or more objects. -> - [Create or modify an alert template](../04_toc_pem_features/09_pem_alerting/#pem_custom_alert_templates). -> - [Create or Modify an email group](../04_toc_pem_features/09_pem_alerting/#pem_email_groups). -> - Manage [PEM Server configuration](../04_toc_pem_features/09_pem_alerting/#pem_email_groups) details. +> - [Copy an alert](02_pem_alert_copy/#pem_alert_copy) from one object to one or more objects. +> - [Create or modify an alert template](04_pem_custom_alert_templates/#pem_custom_alert_templates). +> - [Create or Modify an email group](05_pem_email_groups/#pem_email_groups). +> - Manage [PEM Server configuration](05_pem_email_groups/#pem_email_groups) details. > - Access the PEM online help. -You can configure an alert to notify Nagios network-alerting software when that alert is triggered. For more information, see [Using PEM with Nagios](../04_toc_pem_features/09_pem_alerting/#using_pem_with_nagios). +You can configure an alert to notify Nagios network-alerting software when that alert is triggered. For more information, see [Using PEM with Nagios](09_using_pem_with_nagios/#using_pem_with_nagios). -To [create a new alert](../04_toc_pem_features/09_pem_alerting/#pem_alerting_dialog), click the add icon in the upper-right corner of the `Alerts` table. +To [create a new alert](01_pem_alerting_dialog/#pem_alerting_dialog), click the add icon in the upper-right corner of the `Alerts` table. Contents:
-pem\_alerting\_dialog pem\_alert\_copy pem\_alert\_templates pem\_custom\_alert\_templates pem\_email\_groups pem\_webhooks snmp\_mib\_generation snmp\_trap\_details using\_pem\_with\_nagios +pem_alerting_dialog pem_alert_copy pem_alert_templates pem_custom_alert_templates pem_email_groups pem_webhooks snmp_mib_generation snmp_trap_details using_pem_with_nagios
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/01_pem_create_new_chart.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/01_pem_create_new_chart.mdx index b2b4283e253..2ca5c664476 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/01_pem_create_new_chart.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/01_pem_create_new_chart.mdx @@ -4,7 +4,6 @@ title: "Creating a New Chart" - Click the `Create New Chart` icon in the `Quick Links` section of the `Manage Charts` tab to open the `Create Chart` wizard. The `Create Chart` wizard will walk you through the steps required to define a new chart. ![Create New Chart Wizard - Configure Chart page](../../images/pem_create_new_chart_conf_chart.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/02_pem_manage_charts_template.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/02_pem_manage_charts_template.mdx index de458f29450..c890bae1379 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/02_pem_manage_charts_template.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/02_pem_manage_charts_template.mdx @@ -4,7 +4,6 @@ title: "Importing a Capacity Manager Template" - Selecting the `Import Capacity Manager Template` from the `Manage Charts` tab's `Quick Links` section opens the `Create Chart` dialog, allowing you to select from your saved Capacity Manager templates. When the dialog opens, use the `Import capacity template` drop-down listbox to select the template you would like to use for your chart. ![Manage Charts Imported Template page](../../images/pem_manage_charts_imported_template.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/index.mdx index 03403d38846..c4b83ef369f 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/10_pem_manage_charts/index.mdx @@ -4,15 +4,14 @@ title: "Using the Manage Charts Tab" - You can use the `Manage Charts` tab to access dialogs that allow you to create or modify a custom line chart or table, or import a Capacity Manager template for use in a custom chart. After defining a chart, you can display the chart on a custom dashboard. To open the `Manage Charts` tab, select `Manage Charts...` from the PEM client `Management` menu. ![Manage Charts tab](../../images/pem_manage_charts.png) The `Manage Charts` tab provides a `Quick Links` menu that allows you to access dialogs to: -> - [Create a New Chart](../04_toc_pem_features/10_pem_manage_charts/#pem_create_new_chart) for use on a custom dashboard. -> - [Import a Capacity Manager template](../04_toc_pem_features/10_pem_manage_charts/#pem_manage_charts_template) to use as a template for creating a custom chart. +> - [Create a New Chart](01_pem_create_new_chart/#pem_create_new_chart) for use on a custom dashboard. +> - [Import a Capacity Manager template](02_pem_manage_charts_template/#pem_manage_charts_template) to use as a template for creating a custom chart. The `Custom Charts` table displays a list of user-defined charts; when a chart is newly added, the font displays in green. When you add an additional chart or refresh the screen, the name of the chart is displayed in black. @@ -34,6 +33,6 @@ Contents:
-pem\_create\_new\_chart pem\_manage\_charts\_template +pem_create_new_chart pem_manage_charts_template
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/01_pem_custom_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/01_pem_custom_dashboard.mdx index 1ce150128d1..31be42ef4ba 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/01_pem_custom_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/01_pem_custom_dashboard.mdx @@ -4,7 +4,6 @@ title: "Creating a Custom Dashboard" - You can use the PEM dashboard editor to create or modify a user-defined dashboard. The custom dashboard may include pre-defined charts, user-defined charts or a mix of pre-defined and user-defined charts. To create a new dashboard, select `Create New Dashboard...` from the `Quick Links` section of the `Manage Dashboards` tab. ![Create Custom Dashboard - Configure option](../../images/pem_custom_dashboard_configure_dashboard.png) @@ -17,7 +16,7 @@ Use the fields in the `Configure` section to specify general information about t Provide information in the fields in the `Ops dashboard options` box if the dashboard will be used as an Ops dashboard: -- Set the `Ops Dashboard?` field to `Yes` to instruct the server to create a dashboard that is formatted for display on an [Ops monitor](../../04_toc_pem_features/11_pem_manage_dashboards/#pem_ops_dashboard). +- Set the `Ops Dashboard?` field to `Yes` to instruct the server to create a dashboard that is formatted for display on an [Ops monitor](02_pem_ops_dashboard/#pem_ops_dashboard). - Set the `Show Title?` field to `Yes` to display the dashboard name at the top of the Ops dashboard. - Use the `Font` drop-down list box to select a custom font style for the title. The selected font style will be displayed in the `Preview` box. - Use the `Font size` drop-down list box to select a custom font size for the title. The selected font style will be displayed in the `Preview` box. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/02_pem_ops_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/02_pem_ops_dashboard.mdx index 488aa63ded5..f951fd1f214 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/02_pem_ops_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/02_pem_ops_dashboard.mdx @@ -4,8 +4,7 @@ title: "Creating an Ops Dashboard" - -You can use the PEM [dashboard editor](../../04_toc_pem_features/11_pem_manage_dashboards/#pem_custom_dashboard) to create a custom dashboard formatted for display on an Ops monitor. An Ops dashboard displays the specified charts and graphs, while omitting header information and minimizing extra banners, titles, and borders. +You can use the PEM [dashboard editor](01_pem_custom_dashboard/#pem_custom_dashboard) to create a custom dashboard formatted for display on an Ops monitor. An Ops dashboard displays the specified charts and graphs, while omitting header information and minimizing extra banners, titles, and borders. To create an Ops dashboard, provide detailed information about the Ops display in the `Ops dashboard options` section of the `Create Dashboard` dialog: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/index.mdx index e16a4bb37a2..66ae2bbc8b6 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/11_pem_manage_dashboards/index.mdx @@ -4,7 +4,6 @@ title: "The PEM Manage Dashboards Tab" - PEM displays performance statistics through a number of system-defined dashboards; each dashboard contains a series of summary views that contain charts, graphs and tables that display statistics related to the selected object. You can use the Manage Dashboards tab to create and manage custom dashboards that display the information that is most relevant to your system. ![Manage Dashboard tab](../../images/pem_manage_dashboards_tab.png) @@ -15,6 +14,6 @@ Contents:
-pem\_custom\_dashboard pem\_ops\_dashboard +pem_custom_dashboard pem_ops_dashboard
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/01_pem_custom_probes.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/01_pem_custom_probes.mdx index 92832965e3e..c5d7e7f5fe9 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/01_pem_custom_probes.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/01_pem_custom_probes.mdx @@ -4,7 +4,6 @@ title: "Custom Probes" - Use the `Custom Probes` tab to create a new probe or modify an existing probe. After creating or modifying a probe, you can incorporate the data gathered by custom probes into existing or new charts or graphs. To open the `Custom Probes` tab, select the `Manage Custom Probes` icon from the `Quick Links` section of the `Manage Probes` tab. ![Custom Probes](../../images/custom_probes.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/02_copy_probe_config.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/02_copy_probe_config.mdx index 6f86646cbde..11addf2b934 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/02_copy_probe_config.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/02_copy_probe_config.mdx @@ -4,7 +4,6 @@ title: "Copy Probe Configuration" - You can use the `Copy Probe Configuration...` dialog to copy probe definitions from one monitored object to one or more monitored objects of the same type. To open the `Copy Probe Configuration...` dialog, highlight the object from which you are copying probes in the PEM client tree control, and select `Manage Probes...` from the `Management` menu. When the `Manage Probes` tab opens, click on `Copy Probe` to open the `Copy Probe Configuration` dialog: ![Copy Probe Configuration](../../images/copy_probe_config.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/01_pem_probes.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/01_pem_probes.mdx index 4128d30b707..f44f9244da6 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/01_pem_probes.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/01_pem_probes.mdx @@ -4,9 +4,59 @@ title: "Probes" - A `probe` is a scheduled task that retrieves information about the database objects that are being monitored by the PEM agent. PEM uses the collected information to build the graphs displayed on each homepage. The `Manage Probes` tab (accessed via the `Management` menu) allows you to modify the data collection schedule and the length of time that PEM will retain information returned by a specific probe. Unless otherwise noted, Postgres Enterprise Manager enables the probes listed in the table below: -
Probe NameInformation Monitored by ProbeProbe Configuration Level
Background Writer Statistics

This probe monitors information about the background writer. The information includes:

  • The number of timed checkpoints
  • The number of requested checkpoints
  • The number of buffers written (by checkpoint)
  • The number of buffers written (by background writer)
  • The number of background writer cycles
  • The number of background buffers written
  • The number of buffers allocated
Server
Blocked Session InformationThis probe returns information about blocked sessions.Server
CPU UsageThis probe monitors CPU Usage information.Agent
Data and Log File Analysis

This probe monitors information about log files. The information includes:

  • The name of the log file
  • The directory in which the log file resides
Server
Database Frozen XIDThis probe monitors the frozen XID of each database.Server
Database Size

This probe monitors information about the size of the monitored databases. The information includes:

  • The time the information was gathered
  • The database name
  • The database size (in MB's)
Server
Database Statistics

This probe monitors database statistics. The information includes:

  • The number of backends
  • The number of transactions committed
  • The number of transactions rolled back
  • The number of blocks read
  • The number of blocks hit
  • The number of rows returned
  • The number of rows fetched
  • The number of rows inserted
  • The number of rows updated
  • The number of rows deleted
Server
Disk Busy Info

This probe monitors information about disk activity.

  • Note: This probe is not supported on Mac OS X, Solaris or HP-UX
Agent
Disk Space

This probe monitors information about disk space usage. The information includes:

  • The amount of disk space used
  • The amount of disk space available
Agent
EDB Audit ConfigurationThis probe monitors the audit logging configuration of Postgres Plus Advanced Servers.Server
Failover Manager Cluster InfoThis probe monitors a Failover Manager cluster, returning information about the cluster. This probe is disabled unless a cluster name and path of the Failover Manager binary is provided on the Server Properties dialog.Server
Failover Manager Node StatusThis probe monitors a Failover Manager cluster, returning detailed about each node within the cluster. This probe is disabled unless a cluster name and path of the Failover Manager binary is provided on the Server Properties dialog.Server
Function Statistics

This probe monitors a database, retrieving information about functions. The information includes:

  • Function names
  • Argument types
  • Return values
Database
Index Size

This probe monitors a database, retrieving information about indexes. The information includes:

  • The name of the index
  • The time the data was gathered
  • The size of the index (in MB's)
Database
Index Statistics

This probe monitors index statistics. The information includes:

  • The number of index scans
  • The number of rows read
  • The number of rows fetched
  • The number of blocks read
  • The number of blocks hit
Database
Installed Packages

This probe monitors the packages that are currently installed. The information gathered includes:

  • The name of the installed package
  • The version of the installed package
  • The date and time that the probe executed

Agent

IO Analysis

This probe monitors disk I/O information in. The information includes:

  • The number of blocks read
  • The number of blocks written
  • The date and time that the probe executed
  • Note: This probe is not supported on Mac OS X

Agent

Load Average

This probe monitors CPU load averages. The information includes:

  • The 1-minute load average
  • The 5-minute load average
  • The 15-minute load average
  • Note: This probe is not supported on Windows
Agent
Lock Information

This probe monitors lock information. The information includes:

  • The database name
  • The lock type
  • The lock mode
  • The process holding the lock
Server
Memory Usage

This probe monitors information about system memory usage. The information includes:

  • Total RAM in MB

  • Free RAM in MB

  • Total swap memory in MB

  • Free swap memory in MB

  • Shared system memory in MB (It is used by tuning wizard to tune the memory parameters for the database server)

    • On non-windows system, it is shmmax value and read from /proc/sys/kernel/shmmax
    • On windows, it is same as total memory.
Agent
Network Statistics

This probe monitors network statistics. The information includes:

  • The interface IP address
  • The number of packets sent
  • The number of packets received
  • The number of bytes sent
  • The number of bytes received
  • The link speed (in MB/second)
Agent
Number of Prepared TransactionsThis probe stores the number of prepared transactions.Server
Number of WAL FilesThis probe monitors the number of WAL files.Server
Object Catalog: Database

This probe monitors a list of databases and their properties The information includes:

  • The database name
  • The database encoding type
  • If the database allows user connections or system connections
Server
Object Catalog: Foreign Key

This probe monitors a list of foreign keys and their properties. The information includes:

  • The name of the table that contains the foreign key
  • The name of the table that the foreign key references
  • The name of the database in which the table resides
  • The name of the schema in which the table resides
Schema
Object Catalog: Function

This probe monitors a list of functions and their properties. The information includes:

  • The name of the function
  • The name of the schema in which the function resides
  • The name of the database in which the function resides
Schema
Object Catalog: Index

This probe monitors a list of indexes and their properties. The information includes:

  • The name of the index
  • The name of the table that the index is associated with
  • The name of the database in which the indexed table resides
Schema
Object Catalog: SchemaThis probe monitors a list of schemas and their associated databases and servers.Database
Object Catalog: SequenceThis probe monitors a list of sequences and their properties.Schema
Object Catalog: Table

This probe monitors a list of table information. The information includes:

  • The table name
  • The name of the schema in which the table resides
  • The name of the database in which the schema resides
  • A Boolean indicator that indicates if the table has a primary key
Schema
Object Catalog: TablespaceThis probe monitors a list of tablespaces.Server
Operating System InformationThis probe monitors the operating system details and boot time.Agent
Package Catalog

This probe monitors the packages that are currently available for installation. The information gathered includes:

  • The package name
  • The package version
Agent
PG HBA ConfThis probe monitors authentication configuration information from the pg_hba.conf file.Server
Server InformationThis probe monitors information about servers.Server
Session Information

This probe monitors session information. The information includes:

  • The name of the session user
  • The date and time that the session connected to the server
  • The status of the session at the time that the information was gathered (idle, waiting, etc)
  • The client address and port number
Server
SettingsThis probe monitors the values currently assigned to GUC variables.Server
SQL ProtectThis probe monitors a server, retrieving information about SQL injection attacks.Server
Slony ReplicationThis probe monitors lag data for clusters replicated using Slony.Database
Streaming Replication

This probe monitors a cluster that is using streaming replication, retrieving information about:

  • The sent Xlog location (in bytes)
  • The write Xlog location (in bytes)
  • The flush Xlog location (in bytes)
  • The replay Xlog location (in bytes)
  • The Xlog lag (in segments)
  • The Xlog lag (in pages)
Server
Streaming Replication Lag Time

This probe monitors a cluster that is using streaming replication, retrieving lag information about:

  • Replication lag time (in seconds)
  • Current status of replication (running/paused)
Server
Streaming Replication Database Conflicts

This probe monitors a database that is using streaming replication, retrieving information about any conflicts that arise. This includes information about queries that have been canceled due to:

  • The # of drop tablespace conflicts
  • The # of lock timeout conflicts
  • The # of old snapshot conflicts
  • The # of pinned buffer conflicts
  • The # of deadlock conflicts
Server
Table Bloat

This probe monitors information about the current table bloat. The information includes:

  • The name of the table
  • The name of the schema in which the table resides
  • The estimated number of pages
  • The estimated number of wasted pages
  • The estimated number of bytes per row
Database
Table Frozen XIDThis probe monitors the frozen XID of each table.Schema
Table Size

This probe monitors information about table size. The information includes:

  • Table size (in MB's)
  • Total index size (in MB's)
  • Total table size, with indexes and TOAST (in MB's)
Database
Table Statistics

This probe monitors table statistics. The information includes:

  • The number of sequential scans
  • The number of sequential scan rows
  • The number of index scans
  • The number of index scan rows
  • The number of rows inserted
  • The number of rows updated
  • The number of rows deleted
  • The number of live rows
  • The number of dead rows
  • The last VACUUM
  • The last auto-vacuum
  • The last ANALYZE
  • The last auto-analyze
  • The number of pages estimated by ANALYZE
  • The number of rows estimated by ANALYZE
Database
Tablespace SizeThis probe monitors a list of tablespaces and their sizes.Server
User Information

This probe monitors a list of the current users. The stored information includes:

  • The user name
  • The user type (superuser vs. non-superuser)
  • The server to which the user is connected
Server
WAL Archive Status

This probe monitors the status of the WAL archive. The stored information includes:

  • The # of WAL archives done
  • The # of WAL archives pending
  • The last archive time
  • The # of WAL archives failed
  • The time of the last failure
Server
xDB ReplicationThis probe monitors lag data for clusters replicated using xDB replication.Database
+| Probe Name | Information Monitored by Probe | Probe Configuration Level | +| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------- | +| Background Writer Statistics | This probe monitors information about the background writer. The information includes:
- The number of timed checkpoints
- The number of requested checkpoints
- The number of buffers written (by checkpoint)
- The number of buffers written (by background writer)
- The number of background writer cycles
- The number of background buffers written
- The number of buffers allocated
| Server | +| Blocked Session Information | This probe returns information about blocked sessions. | Server | +| CPU Usage | This probe monitors CPU Usage information. | Agent | +| Data and Log File Analysis | This probe monitors information about log files. The information includes:
- The name of the log file
- The directory in which the log file resides
| Server | +| Database Frozen XID | This probe monitors the frozen XID of each database. | Server | +| Database Size | This probe monitors information about the size of the monitored databases. The information includes:
- The time the information was gathered
- The database name
- The database size (in MB's)
| Server | +| Database Statistics | This probe monitors database statistics. The information includes:
- The number of backends
- The number of transactions committed
- The number of transactions rolled back
- The number of blocks read
- The number of blocks hit
- The number of rows returned
- The number of rows fetched
- The number of rows inserted
- The number of rows updated
- The number of rows deleted
| Server | +| Disk Busy Info | This probe monitors information about disk activity.
- **Note:** This probe is not supported on Mac OS X, Solaris or HP-UX
| Agent | +| Disk Space | This probe monitors information about disk space usage. The information includes:
- The amount of disk space used
- The amount of disk space available
| Agent | +| EDB Audit Configuration | This probe monitors the audit logging configuration of Postgres Plus Advanced Servers. | Server | +| Failover Manager Cluster Info | This probe monitors a Failover Manager cluster, returning information about the cluster. This probe is disabled unless a cluster name and path of the Failover Manager binary is provided on the Server Properties dialog. | Server | +| Failover Manager Node Status | This probe monitors a Failover Manager cluster, returning detailed about each node within the cluster. This probe is disabled unless a cluster name and path of the Failover Manager binary is provided on the Server Properties dialog. | Server | +| Function Statistics | This probe monitors a database, retrieving information about functions. The information includes:
- Function names
- Argument types
- Return values
| Database | +| Index Size | This probe monitors a database, retrieving information about indexes. The information includes:
- The name of the index
- The time the data was gathered
- The size of the index (in MB's)
| Database | +| Index Statistics | This probe monitors index statistics. The information includes:
- The number of index scans
- The number of rows read
- The number of rows fetched
- The number of blocks read
- The number of blocks hit
| Database | +| Installed Packages | This probe monitors the packages that are currently installed. The information gathered includes:
- The name of the installed package
- The version of the installed package
- The date and time that the probe executed
|


Agent

| +| IO Analysis | This probe monitors disk I/O information in. The information includes:
- The number of blocks read
- The number of blocks written
- The date and time that the probe executed
- **Note:** This probe is not supported on Mac OS X
|


Agent

| +| Load Average | This probe monitors CPU load averages. The information includes:
- The 1-minute load average
- The 5-minute load average
- The 15-minute load average
- **Note:** This probe is not supported on Windows
| Agent | +| Lock Information | This probe monitors lock information. The information includes:
- The database name
- The lock type
- The lock mode
- The process holding the lock
| Server | +| Memory Usage | This probe monitors information about system memory usage. The information includes:
- Total RAM in MB
- Free RAM in MB
- Total swap memory in MB
- Free swap memory in MB
- Shared system memory in MB (It is used by tuning wizard to tune the memory parameters for the database server)
- On non-windows system, it is `shmmax` value and read from `/proc/sys/kernel/shmmax`
- On windows, it is same as total memory.
| Agent | +| Network Statistics | This probe monitors network statistics. The information includes:
- The interface IP address
- The number of packets sent
- The number of packets received
- The number of bytes sent
- The number of bytes received
- The link speed (in MB/second)
| Agent | +| Number of Prepared Transactions | This probe stores the number of prepared transactions. | Server | +| Number of WAL Files | This probe monitors the number of WAL files. | Server | +| Object Catalog: Database | This probe monitors a list of databases and their properties The information includes:
- The database name
- The database encoding type
- If the database allows user connections or system connections
| Server | +| Object Catalog: Foreign Key | This probe monitors a list of foreign keys and their properties. The information includes:
- The name of the table that contains the foreign key
- The name of the table that the foreign key references
- The name of the database in which the table resides
- The name of the schema in which the table resides
| Schema | +| Object Catalog: Function | This probe monitors a list of functions and their properties. The information includes:
- The name of the function
- The name of the schema in which the function resides
- The name of the database in which the function resides
| Schema | +| Object Catalog: Index | This probe monitors a list of indexes and their properties. The information includes:
- The name of the index
- The name of the table that the index is associated with
- The name of the database in which the indexed table resides
| Schema | +| Object Catalog: Schema | This probe monitors a list of schemas and their associated databases and servers. | Database | +| Object Catalog: Sequence | This probe monitors a list of sequences and their properties. | Schema | +| Object Catalog: Table | This probe monitors a list of table information. The information includes:
- The table name
- The name of the schema in which the table resides
- The name of the database in which the schema resides
- A Boolean indicator that indicates if the table has a primary key
| Schema | +| Object Catalog: Tablespace | This probe monitors a list of tablespaces. | Server | +| Operating System Information | This probe monitors the operating system details and boot time. | Agent | +| Package Catalog | This probe monitors the packages that are currently available for installation. The information gathered includes:
- The package name
- The package version
| Agent | +| PG HBA Conf | This probe monitors authentication configuration information from the `pg_hba.conf` file. | Server | +| Server Information | This probe monitors information about servers. | Server | +| Session Information | This probe monitors session information. The information includes:
- The name of the session user
- The date and time that the session connected to the server
- The status of the session at the time that the information was gathered (idle, waiting, etc)
- The client address and port number
| Server | +| Settings | This probe monitors the values currently assigned to GUC variables. | Server | +| SQL Protect | This probe monitors a server, retrieving information about SQL injection attacks. | Server | +| Slony Replication | This probe monitors lag data for clusters replicated using Slony. | Database | +| Streaming Replication | This probe monitors a cluster that is using streaming replication, retrieving information about:
- The sent Xlog location (in bytes)
- The write Xlog location (in bytes)
- The flush Xlog location (in bytes)
- The replay Xlog location (in bytes)
- The Xlog lag (in segments)
- The Xlog lag (in pages)
| Server | +| Streaming Replication Lag Time | This probe monitors a cluster that is using streaming replication, retrieving lag information about:
- Replication lag time (in seconds)
- Current status of replication (running/paused)
| Server | +| Streaming Replication Database Conflicts | This probe monitors a database that is using streaming replication, retrieving information about any conflicts that arise. This includes information about queries that have been canceled due to:
- The # of drop tablespace conflicts
- The # of lock timeout conflicts
- The # of old snapshot conflicts
- The # of pinned buffer conflicts
- The # of deadlock conflicts
| Server | +| Table Bloat | This probe monitors information about the current table bloat. The information includes:
- The name of the table
- The name of the schema in which the table resides
- The estimated number of pages
- The estimated number of wasted pages
- The estimated number of bytes per row
| Database | +| Table Frozen XID | This probe monitors the frozen XID of each table. | Schema | +| Table Size | This probe monitors information about table size. The information includes:
- Table size (in MB's)
- Total index size (in MB's)
- Total table size, with indexes and TOAST (in MB's)
| Database | +| Table Statistics | This probe monitors table statistics. The information includes:
- The number of sequential scans
- The number of sequential scan rows
- The number of index scans
- The number of index scan rows
- The number of rows inserted
- The number of rows updated
- The number of rows deleted
- The number of live rows
- The number of dead rows
- The last VACUUM
- The last auto-vacuum
- The last ANALYZE
- The last auto-analyze
- The number of pages estimated by ANALYZE
- The number of rows estimated by ANALYZE
| Database | +| Tablespace Size | This probe monitors a list of tablespaces and their sizes. | Server | +| User Information | This probe monitors a list of the current users. The stored information includes:
- The user name
- The user type (superuser vs. non-superuser)
- The server to which the user is connected
| Server | +| WAL Archive Status | This probe monitors the status of the WAL archive. The stored information includes:
- The # of WAL archives done
- The # of WAL archives pending
- The last archive time
- The # of WAL archives failed
- The time of the last failure
| Server | +| xDB Replication | This probe monitors lag data for clusters replicated using xDB replication. | Database | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/index.mdx index 75c12906e55..73151d888b2 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/index.mdx @@ -4,15 +4,14 @@ title: "Probe Configuration" - -A [probe](../../04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/#pem_probes) is a scheduled task that returns a set of performance metrics about a specific monitored server, database, operating system or agent. You can use the Manage Probes tab to override the default configuration and customize the behavior of each probe. To open the Manage Probes tab, select `Manage Probes…` from the `Management` menu. +A [probe](01_pem_probes/#pem_probes) is a scheduled task that returns a set of performance metrics about a specific monitored server, database, operating system or agent. You can use the Manage Probes tab to override the default configuration and customize the behavior of each probe. To open the Manage Probes tab, select `Manage Probes…` from the `Management` menu. ![Manage Probes tab](../../../images/pem_manage_probes_tab.png) The `Manage Probes` tab provides a set of `Quick Links` that you can use to create and manage probes: -> - Click the `` `Manage Custom Probes `` <pem\_custom\_probes> icon to open the Custom Probes\` tab and create or modify a custom probe. -> - Click the `` `Copy Probes `` <copy\_probe\_config> icon to open the Copy Probe\` dialog, and copy the probe configurations from the currently selected object to one or more monitored objects. +> - Click the `` `Manage Custom Probes `` <pem_custom_probes> icon to open the Custom Probes\` tab and create or modify a custom probe. +> - Click the `` `Copy Probes `` <copy_probe_config> icon to open the Copy Probe\` dialog, and copy the probe configurations from the currently selected object to one or more monitored objects. A probe monitors a unique set of metrics for each specific object type (server, database, database object, or agent); select the name of an object in the tree control to review the probes for that object. @@ -28,6 +27,6 @@ Contents:
-pem\_probes +pem_probes
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/index.mdx index 42847c56e23..f7a7de078fe 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/12_pem_manage_probes/index.mdx @@ -4,15 +4,14 @@ title: "The Manage Probes Tab" - -A [probe](../04_toc_pem_features/12_pem_manage_probes/03_pem_probe_config/#pem_probes) is a scheduled task that returns a set of performance metrics about a specific monitored server, database, operating system or agent. You can use the Manage Probes tab to override the default configuration and customize the behavior of each probe. To open the Manage Probes tab, select `Manage Probes…` from the `Management` menu. +A [probe](03_pem_probe_config/01_pem_probes/#pem_probes) is a scheduled task that returns a set of performance metrics about a specific monitored server, database, operating system or agent. You can use the Manage Probes tab to override the default configuration and customize the behavior of each probe. To open the Manage Probes tab, select `Manage Probes…` from the `Management` menu. ![Manage Probes tab](../../images/pem_manage_probes_tab.png) The `Manage Probes` tab provides a set of `Quick Links` that you can use to create and manage probes: -> - Click the `` `Manage Custom Probes `` <pem\_custom\_probes> icon to open the Custom Probes\` tab and create or modify a custom probe. -> - Click the `` `Copy Probes `` <copy\_probe\_config> icon to open the Copy Probe\` dialog, and copy the probe configurations from the currently selected object to one or more monitored objects. +> - Click the `` `Manage Custom Probes `` <pem_custom_probes> icon to open the Custom Probes\` tab and create or modify a custom probe. +> - Click the `` `Copy Probes `` <copy_probe_config> icon to open the Copy Probe\` dialog, and copy the probe configurations from the currently selected object to one or more monitored objects. A probe monitors a unique set of metrics for each specific object type (server, database, database object, or agent); select the name of an object in the tree control to review the probes for that object. @@ -28,6 +27,6 @@ Contents:
-pem\_custom\_probes copy\_probe\_config pem\_probe\_config +pem_custom_probes copy_probe_config pem_probe_config
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/13_pem_alert_blackout.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/13_pem_alert_blackout.mdx index f337f9f77ea..34aa7185623 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/13_pem_alert_blackout.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/13_pem_alert_blackout.mdx @@ -4,7 +4,6 @@ title: "Schedule Alert Blackout" - You can use the `Schedule Alert Blackout` option on the `Management` menu to schedule an alert blackout for your Postgres servers and PEM Agents during maintenance. Alerts will not be raised during a defined blackout period. To schedule an alert blackout, click on the `Management` menu and select `Schedule Alert Blackout`. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/14_pem_scheduled_system_jobs.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/14_pem_scheduled_system_jobs.mdx index b05f16db511..564c0b51e74 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/14_pem_scheduled_system_jobs.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/14_pem_scheduled_system_jobs.mdx @@ -4,7 +4,6 @@ title: "Scheduled System Jobs" - PEM defines system jobs to take care of cleanup activities at scheduled intervals. All of the system jobs are enabled by default, and are scheduled to deploy on a regular interval. You can query the `pem.job` table in the `pem` database to review a list of the system jobs. The current schedule for system jobs is stored in the `pem.schedule` table in the `pem` database. The system job names, their descriptions, and default deployment intervals are listed in the table below: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/15_pem_scheduled_task_tab.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/15_pem_scheduled_task_tab.mdx index 59ace19d862..c189d5ad5b3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/15_pem_scheduled_task_tab.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/15_pem_scheduled_task_tab.mdx @@ -4,7 +4,6 @@ title: "Scheduled Task Tab" - You can schedule the execution of user-defined tasks on registered servers for a time that is most convenient, and least intrusive to your users. Tasks may be one-off, or recurring and are comprised of one or more steps, which may be a SQL script, a batch/shell script, or an internal function in the PEM agent. You can view pending tasks on the *Scheduled Tasks tab.* To open the `Scheduled Tasks` tab, select either a PEM Agent or a managed server in the tree control of the PEM client and select `Scheduled Tasks...` from the `Management` menu. @@ -60,6 +59,7 @@ Use the arrow to the left of an execution date (in the `Steps` column) to view d To delete a user-defined task, highlight the name of the task, and click the `Delete` icon located to the left of a task's name. The task will be marked for deletion, and removed when you click the `Save` icon (located in the upper-right corner of the `Manage Tasks` table. -**Note:** Tasks with no `Next run` date will automatically be removed from the PEM server when the last run date is more than probe\_log\_retention\_time <pem\_config\_options> days ago. +!!! Note + Tasks with no `Next run` date will automatically be removed from the PEM server when the last run date is more than probe_log_retention_time <pem_config_options> days ago. Please note that if any of the scheduled tasks for backup, restore, validate host, validate server or delete obsolete backup for any of the BART Server gets deleted, it will not display under the `BART Tool Activities` graph of BART Server's dashboard. However, it gets listed under the `Initiated Server Backups` list. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/16_pem_scheduled_jobs.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/16_pem_scheduled_jobs.mdx index 089ad24d9c6..f0e7e5bf951 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/16_pem_scheduled_jobs.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/16_pem_scheduled_jobs.mdx @@ -4,7 +4,6 @@ title: "Creating a PEM Scheduled Job" - You can create a PEM scheduled job to perform a set of custom-defined steps in the specified sequence. These steps may contain SQL code or a batch/shell script that you may run on a server that is bound with the agent. You can schedule these jobs to suit your business requirements. For example, you can create a job for taking a backup of a particular database server and schedule it to run on a specific date and time of every month. To create or manage a PEM scheduled job, use the PEM tree control to browse to the PEM agent for which you want to create the job. The tree control will display a Jobs node, under which currently defined jobs are displayed. To add a new job, right click on the Jobs node, and select Create Job... from the context menu. @@ -50,11 +49,11 @@ Use fields on the step definition dialog to define the step: Use the context-sensitive field on the step definition dialog's `Code` tab to provide the SQL code or batch script that will be executed during the step: > - If the step invokes SQL code, provide one or more SQL statements in the `SQL query` field. -> - If the step invokes a batch script, provide the script in the `Code` field. If you are running on a Windows server, standard batch file syntax must be used. When running on a Linux server, any shell script may be used, provided that a suitable interpreter is specified on the first line (e.g. *\#!/bin/sh*). Along with the defined inline code, you can also provide the path of any batch script, shell script, or SQL file on the filesystem. +> - If the step invokes a batch script, provide the script in the `Code` field. If you are running on a Windows server, standard batch file syntax must be used. When running on a Linux server, any shell script may be used, provided that a suitable interpreter is specified on the first line (e.g. *#!/bin/sh*). Along with the defined inline code, you can also provide the path of any batch script, shell script, or SQL file on the filesystem. To invoke a script on a Linux system, you must modify the entry for `batch_script_user` parameter of agent.cfg file and specify the user that should be used to run the script. You can either specify a non-root user or root for this parameter. If you do not specify a user, or the specified user does not exist, then the script will not be executed. Restart the agent after modifying the file. If pemagent is being run by a non-root user then the value of `batch_script_user` will be ignored and the script will be executed by the same non-root user that is being used for running the pemagent. -To invoke a script on a Windows system, set the registry entry for `AllowBatchJobSteps` as true and restart the PEM agent. PEM registry entries are located in HKEY\_LOCAL\_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. +To invoke a script on a Windows system, set the registry entry for `AllowBatchJobSteps` as true and restart the PEM agent. PEM registry entries are located in HKEY_LOCAL_MACHINE\\Software\\Wow6432Node\\EnterpriseDB\\PEM\\agent. After providing all the information required by the step, click the `Save` button to save and close the step definition dialog. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/17_pem_job_notification.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/17_pem_job_notification.mdx index 27b13daba73..3cfd83a20ef 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/17_pem_job_notification.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/17_pem_job_notification.mdx @@ -4,7 +4,6 @@ title: "Sending email notifications for a job" - You can configure the settings in PEM console for sending the SMTP trap on success or failure of a system-generated job (listed under scheduled tasks) or a custom-defined agent job. For information on custom-defined agent job, see ‘Creating PEM Scheduled Jobs’. These email notification settings can be configured at following three levels (in order of precedence) to send email notifications to the specified user group: > - Job level diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/01_pem_log_view.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/01_pem_log_view.mdx index 1910ab28a4d..b3c44255049 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/01_pem_log_view.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/01_pem_log_view.mdx @@ -4,8 +4,7 @@ title: "Log Viewer" - -When PEM executes [scheduled tasks](../../04_toc_pem_features/18_pem_task_view/../../04_toc_pem_features/18_pem_task_view/#pem_task_view), log records are created to record the status of each step of the task for diagnostic purposes. Log records can be viewed on the *Log Viewer* dialogue, opened from the [Scheduled Task dialogue](../../04_toc_pem_features/18_pem_task_view/../../04_toc_pem_features/18_pem_task_view/#pem_task_view). +When PEM executes [scheduled tasks](./#pem_task_view), log records are created to record the status of each step of the task for diagnostic purposes. Log records can be viewed on the *Log Viewer* dialogue, opened from the [Scheduled Task dialogue](./#pem_task_view). ![Scheduled Task Log viewer](../../images/pem_log_view.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/index.mdx index e1b1d5c6f88..f038ee4bc6c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/18_pem_task_view/index.mdx @@ -4,7 +4,6 @@ title: "Task Viewer" - Postgres Enterprise Manager runs tasks on managed servers by scheduling them on the PEM server for the agent on the managed server to execute at the appropriate time. Tasks may be one-off, or recurring and are comprised of one or more steps, which may be a SQL script, a batch/shell script, or an internal function in the PEM agent. Tasks may be viewed using the *Scheduled Tasks dialogue.* To open the `Scheduled Tasks dialogue`, select either a PEM Agent or a managed server in the tree control of the PEM client and select the `Scheduled Tasks` menu option from the `Server` sub-menu of the `Management` menu, or from the context menu. @@ -22,16 +21,17 @@ The dialogue displays the task data relating to the selected object when it was - **Next Run** - The time the task is next scheduled to execute, if any. - **Created** - The time and date that the task was created. -In order to [view the log records](../04_toc_pem_features/18_pem_task_view/#pem_log_view) for a task, select it in the list and click the `Log Viewer` button. +In order to [view the log records](01_pem_log_view/#pem_log_view) for a task, select it in the list and click the `Log Viewer` button. In order to remove tasks and their associated log records (if present), click the checkbox to select each task to be removed, and then click the `Remove` button. -**Note:** Tasks with no next run date will automatically be removed from the PEM server when the last run date is more than probe\_log\_retention\_time <pem\_config\_options> days ago. +!!! Note + Tasks with no next run date will automatically be removed from the PEM server when the last run date is more than probe_log_retention_time <pem_config_options> days ago. Contents:
-pem\_log\_view +pem_log_view
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/19_monitoring_a_failover_manager_cluster.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/19_monitoring_a_failover_manager_cluster.mdx index 1d7181c217d..8818b46a20e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/19_monitoring_a_failover_manager_cluster.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/19_monitoring_a_failover_manager_cluster.mdx @@ -4,17 +4,16 @@ title: "Monitoring a Failover Manager Cluster" - You can configure PEM to display status information about one or more Failover Manager clusters on the Streaming Replication dashboard. Before configuring PEM to monitor a Failover Manager cluster, you must install and configure Streaming Replication and Failover Manager on the cluster. Please note that your Streaming Replication `standby.signal` file must include the following parameters: -- primary\_conninfo -- promote\_trigger\_file +- primary_conninfo +- promote_trigger_file -For information about installing and configuring Failover Manager and Streaming Replication, please see the EnterpriseDB Failover Manager Guide, available at www.enterprisedb.com. +For information about installing and configuring Failover Manager and Streaming Replication, please see the EnterpriseDB Failover Manager Guide, available at [www.enterprisedb.com](http://www.enterprisedb.com). -To configure PEM to monitor a Failover Manager cluster, use the PEM client to create a server definition for the primary node of the Failover Manager cluster. Use the tabs on the [New Server Registration](../01_toc_pem_getting_started/#pem_define_connection) dialog to specify general connection properties for the primary node; use fields on the `Advanced` tab to specify information about the Failover Manager cluster: +To configure PEM to monitor a Failover Manager cluster, use the PEM client to create a server definition for the primary node of the Failover Manager cluster. Use the tabs on the [New Server Registration](../01_toc_pem_getting_started/07_pem_define_connection/#pem_define_connection) dialog to specify general connection properties for the primary node; use fields on the `Advanced` tab to specify information about the Failover Manager cluster: - Use the `EFM Cluster Name` field to specify the name of the Failover Manager cluster. The cluster name is the prefix of the name of the cluster properties file. For example, if your cluster properties file is named `efm.properties`, your cluster name is `efm`. - Use the `EFM Installation Path` field to specify the location of the Failover Manager binary file. By default, the Failover Manager binary file is installed in `/usr/efm-x.x/bin`. @@ -28,7 +27,7 @@ To include Failover Manager information on the Streaming Analysis dashboard, you To enable a probe, right click on the node name, and select `Manage Probes` from the `Management` menu. -To view the `Streaming Replication Analysis` dashboard and the status of the Failover Manager cluster, right click on the name of the primary node in the `Object browser` tree control and navigate through the `Dashboards` menu to select [Streaming Replication Analysis](../04_toc_pem_features/01_dashboards/#str_replication_dashboard). +To view the `Streaming Replication Analysis` dashboard and the status of the Failover Manager cluster, right click on the name of the primary node in the `Object browser` tree control and navigate through the `Dashboards` menu to select [Streaming Replication Analysis](01_dashboards/16_str_replication_dashboard/#str_replication_dashboard). ## Promoting a Cluster diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/20_performance_diagnostic.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/20_performance_diagnostic.mdx index 86261cc767a..0b48a3b34c3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/20_performance_diagnostic.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/20_performance_diagnostic.mdx @@ -4,7 +4,6 @@ title: "Performance Diagnostic" - You can use the Performance Diagnostic dashboard to analyze the database performance for Postgres instances by monitoring the wait events. To display the diagnostic graphs, PEM uses the data collected by EDB Wait States module. Peformance Diagnostic feature is supported for Advanced Server databases from PEM 7.6 version onwards and for PostgreSQL databases it is supported from PEM 8.0 onwards. @@ -21,7 +20,7 @@ For PostgreSQL databases, Performance Diagnostics is supported only for versions
-For more information on EDB Wait States, see [EDB Postgres Advanced Server Guide](https://www.enterprisedb.com/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/latest/performance_analysis_and_tuning.html#edb-wait-states). +For more information on EDB Wait States, see [EDB Postgres Advanced Server Guide](/epas/latest/epas_guide/13_performance_analysis_and_tuning/). You can analyze the Wait States data on multiple levels by narrowing down your selection of data. Each level of the graph is populated on the basis of your selection of data at the higher level. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/21_reports.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/21_reports.mdx index 31962bc019c..0f5bcddb164 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/21_reports.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/21_reports.mdx @@ -4,7 +4,6 @@ title: "Reports" - You can generate the System Configuration report and Core Usage report for all locally and remotely managed servers. To generate this report, select `Reports` from the `Management` Menu. Reports has following options: @@ -14,7 +13,7 @@ Reports has following options: > - Core Usage Report (JSON) > - Core Usage Report (HTML) -Please note that only superusers or the users with the pem\_admin role permission can download the System Configuration or Core Usage reports. +Please note that only superusers or the users with the pem_admin role permission can download the System Configuration or Core Usage reports. Also note that information in these reports will reflect the latest probe run time. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/index.mdx index 7dabd2be2b1..c1c62297279 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/04_toc_pem_features/index.mdx @@ -4,14 +4,13 @@ title: "Enterprise Management Features" - Postgres Enterprise Manager offers a number of additional enterprise management features that will assist you in managing, analyzing, streamlining, and deploying Postgres functionality. PEM probes monitor managed servers, retrieving information that PEM then analyzes to create dashboards that display useful information and statistics about your hosts, servers and databases. PEM dialogs provide easy access to probe, server, and agent configurations so you can enable and customise the behaviour of PEM features. Contents:
-dashboards pem\_server\_config +dashboards pem_server_config
@@ -21,7 +20,7 @@ Contents:
-log\_manager audit\_manager pem\_log\_analysis\_expert tuning\_wizard pem\_postgres\_expert capacity\_manager +log_manager audit_manager pem_log_analysis_expert tuning_wizard pem_postgres_expert capacity_manager
@@ -31,7 +30,7 @@ Contents:
-pem\_alerting pem\_manage\_charts pem\_manage\_dashboards pem\_manage\_probes +pem_alerting pem_manage_charts pem_manage_dashboards pem_manage_probes
@@ -41,7 +40,7 @@ Contents:
-pem\_alert\_blackout pem\_scheduled\_system\_jobs pem\_scheduled\_task\_tab pem\_scheduled\_jobs pem\_job\_notification pem\_task\_view monitoring\_a\_failover\_manager\_cluster performance\_diagnostic +pem_alert_blackout pem_scheduled_system_jobs pem_scheduled_task_tab pem_scheduled_jobs pem_job_notification pem_task_view monitoring_a_failover_manager_cluster performance_diagnostic
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/01_grant_wizard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/01_grant_wizard.mdx index cef8d921668..98aeeaffddd 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/01_grant_wizard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/01_grant_wizard.mdx @@ -4,7 +4,6 @@ title: "Grant Wizard" - The `Grant Wizard` tool is a graphical interface that allows you to manage the privileges of one or more database objects in a point-and-click environment. A search box, dropdown lists, and checkboxes facilitate quick selections of database objects, roles and privileges. The wizard organizes privilege management through a sequence of windows: `Object Selection (step 1 of 3)`, `Privileges Selection (step 2 of 3)` and `Final (Review Selection) (step 3 of 3)`. The `Final (Review Selection)` window displays the SQL code generated by wizard selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/02_add_restore_point_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/02_add_restore_point_dialog.mdx index a085523150a..225de0e909f 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/02_add_restore_point_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/02_add_restore_point_dialog.mdx @@ -4,7 +4,6 @@ title: "Add named restore point Dialog" - Use the `Add named restore point` dialog to take a named snapshot of the state of the server for use in a recovery file. To create a named restore point, the server's postgresql.conf file must specify a `wal_level` value of `replica`, or `logical`. You must be a database superuser to create a restore point. ![Restore point dialog](../images/add_restore_point.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/03_import_export_data.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/03_import_export_data.mdx index 8595c646c6f..aa50b28d839 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/03_import_export_data.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/03_import_export_data.mdx @@ -4,7 +4,6 @@ title: "Import/Export data Dialog" - Use the `Import/Export data` dialog to copy data from a table to a file, or copy data from a file into a table. The `Import/Export data` dialog organizes the import/export of data through the `Options` and `Columns` tabs. @@ -62,6 +61,6 @@ Note
-You can click on the ![sm\_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](../05_toc_pem_management_basics/#storage_manager) to download the backup file on the client machine . +You can click on the ![sm_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](05_storage_manager/#storage_manager) to download the backup file on the client machine . diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog.mdx index c78bc2f6ffb..ec0b7419174 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/01_maintenance_dialog.mdx @@ -4,7 +4,6 @@ title: "Maintenance Dialog" - Use the `Maintenance` dialog to VACUUM, ANALYZE, REINDEX or CLUSTER a database or selected database objects. ![Maintenance dialog](../../images/maintenance.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/index.mdx index 0edb7e192ea..ade6072fbed 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/04_maintenance/index.mdx @@ -4,7 +4,6 @@ title: "Maintain a database object" - ![Maintenance dialog](../../images/maintenance.png) This tool allows to maintain the database in total, or only a selected table, or a selected index. @@ -33,6 +32,6 @@ The RECREATE option doesn't call the REINDEX SQL command internally, instead it
-maintenance\_dialog +maintenance_dialog
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/05_storage_manager.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/05_storage_manager.mdx index d9584df4050..2e0d1522314 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/05_storage_manager.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/05_storage_manager.mdx @@ -4,7 +4,6 @@ title: "Storage Manager" - *Storage Manager* is a feature that helps you manage your systems storage device. You can use *Storage Manager* to: - Download, upload, or manage operating system files. @@ -41,7 +40,7 @@ Click on the check box next to *Show hidden files and folders* at the bottom of Use the *Format* drop down list to select the format of the files to be displayed; choose from *sql*, *csv*, or *All Files*. -You can also download backup files through *Storage Manager* at the successful completion of the backups taken through [Backup Dialog](../05_toc_pem_management_basics/#backup_dialog), [Backup Global Dialog](../05_toc_pem_management_basics/#backup_globals_dialog), or [Backup Server Dialog](../05_toc_pem_management_basics/#backup_server_dialog). +You can also download backup files through *Storage Manager* at the successful completion of the backups taken through [Backup Dialog](06_backup_dialog/#backup_dialog), [Backup Global Dialog](07_backup_globals_dialog/#backup_globals_dialog), or [Backup Server Dialog](08_backup_server_dialog/#backup_server_dialog). At the successful completion of a backup, click on the icon to open the current backup file in *Storage Manager* on the *process watcher* window. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/06_backup_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/06_backup_dialog.mdx index 58e8d649aa6..8fb7f06020c 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/06_backup_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/06_backup_dialog.mdx @@ -4,8 +4,7 @@ title: "Backup Dialog" - -`PEM` uses the *pg\_dump* utility to provide an easy way to create a backup in a plain-text or archived format. You can then use a client application (like `psql` or the `Query Tool`) to restore a plain-text backup file, or use the Postgres `pg_restore` utility to restore an archived backup. The `pg_dump` utility must have read access to all database objects that you want to back up. +`PEM` uses the *pg_dump* utility to provide an easy way to create a backup in a plain-text or archived format. You can then use a client application (like `psql` or the `Query Tool`) to restore a plain-text backup file, or use the Postgres `pg_restore` utility to restore an archived backup. The `pg_dump` utility must have read access to all database objects that you want to back up. You can backup a single table, a schema, or a complete database. Select the name of the backup source in the `Browser` tree control, right click to open the context menu, and select `Backup...` to open the `Backup` dialog. The name of the object selected will appear in the dialog title bar. @@ -84,7 +83,7 @@ Click the `Dump options` tab to continue. Use the box fields in the `Dump option > - Move the switch next to `Force double quotes on identifiers` to the `Yes` position to force the quoting of all identifiers. > - Move the switch next to `Use SET SESSION AUTHORIZATION` to the `Yes` position to include a statement that will use a SET SESSION AUTHORIZATION command to determine object ownership (instead of an ALTER OWNER command). -When you’ve specified the details that will be incorporated into the pg\_dump command: +When you’ve specified the details that will be incorporated into the pg_dump command: - Click the `Backup` button to build and execute a command that builds a backup based on your selections on the `Backup` dialog. - Click the `Cancel` button to exit without saving work. @@ -107,6 +106,6 @@ Note -You can click on the ![sm\_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](../05_toc_pem_management_basics/#storage_manager) to download the backup file on the client machine . +You can click on the ![sm_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](05_storage_manager/#storage_manager) to download the backup file on the client machine . diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/07_backup_globals_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/07_backup_globals_dialog.mdx index 1021a858415..468f465f213 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/07_backup_globals_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/07_backup_globals_dialog.mdx @@ -4,7 +4,6 @@ title: "Backup Globals Dialog" - Use the `Backup Globals` dialog to create a plain-text script that recreates all of the database objects within a cluster, and the global objects that are shared by those databases. Global objects include tablespaces, roles, and object properties. You can use the PEM `Query Tool` to play back a plain-text script, and recreate the objects in the backup. ![Backup Globals dialog - General tab](../images/backup_globals_general.png) @@ -39,6 +38,6 @@ Note -You can click on the ![sm\_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](../05_toc_pem_management_basics/#storage_manager) to download the backup file on the client machine . +You can click on the ![sm_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](05_storage_manager/#storage_manager) to download the backup file on the client machine . diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/08_backup_server_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/08_backup_server_dialog.mdx index afab3e8d32e..2205671eca3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/08_backup_server_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/08_backup_server_dialog.mdx @@ -4,7 +4,6 @@ title: "Backup Server Dialog" - Use the `Backup Server` dialog to create a plain-text script that will recreate the selected server. You can use the PEM `Query Tool` to play back a plain-text script, and recreate the server. ![Backup Server dialog - General tab](../images/backup_server_general.png) @@ -76,6 +75,6 @@ Note -You can click on the ![sm\_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](../05_toc_pem_management_basics/#storage_manager) to download the backup file on the client machine . +You can click on the ![sm_icon](../images/sm_icon.png) icon in the process watcher window to open the file location in the Storage Manager. You can use the [Storage Manager](05_storage_manager/#storage_manager) to download the backup file on the client machine . diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/09_restore_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/09_restore_dialog.mdx index 4cec4761af7..c6702e567ef 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/09_restore_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/09_restore_dialog.mdx @@ -4,10 +4,9 @@ title: "Restore Dialog" +The `Restore` dialog provides an easy way to use a Custom, tar, or Directory format backup taken with the PEM `Backup` dialog to recreate a database or database object. The `Backup` dialog invokes options of the pg_dump client utility; the `Restore` dialog invokes options of the pg_restore client utility. -The `Restore` dialog provides an easy way to use a Custom, tar, or Directory format backup taken with the PEM `Backup` dialog to recreate a database or database object. The `Backup` dialog invokes options of the pg\_dump client utility; the `Restore` dialog invokes options of the pg\_restore client utility. - -You can use the `Query Tool` to play back the script created during a plain-text backup made with the `Backup` dialog. For more information about backing up or restoring, please refer to the documentation for [pg\_dump](https://www.postgresql.org/docs/current/static/app-pgdump.html) or [pg\_restore](https://www.postgresql.org/docs/current/static/app-pgrestore.html). +You can use the `Query Tool` to play back the script created during a plain-text backup made with the `Backup` dialog. For more information about backing up or restoring, please refer to the documentation for [pg_dump](https://www.postgresql.org/docs/current/static/app-pgdump.html) or [pg_restore](https://www.postgresql.org/docs/current/static/app-pgrestore.html). ![Restore dialog - General tab](../images/restore_general.png) @@ -20,7 +19,7 @@ Use the fields on the `General` tab to specify general information about the res - Enter the complete path to the backup file in the `Filename` field. Optionally, select the `Browser` icon (ellipsis) to the right to navigate into a directory and select the file that contains the archive. -- Use the `Number of Jobs` field to specify if pg\_restore should use multiple (concurrent) jobs to process the restore. Each job uses a separate connection to the server. +- Use the `Number of Jobs` field to specify if pg_restore should use multiple (concurrent) jobs to process the restore. Each job uses a separate connection to the server. - Use the drop-down listbox next to `Rolename` to specify the role that will be used to authenticate with the server during the restore process. @@ -73,7 +72,7 @@ Click the `Restore options` tab to continue. Use the fields on the `Restore opti > - Move the switch next to `Use SET SESSION AUTHORIZATION` to the `Yes` position to include a statement that will use a SET SESSION AUTHORIZATION command to determine object ownership (instead of an ALTER OWNER command). > - Move the switch next to `Exit on error` to the `Yes` position to instruct `pg_restore` to exit restore if there is an error in sending SQL commands. The default is to continue and to display a count of errors at the end of the restore. -When you’ve specified the details that will be incorporated into the pg\_restore command, click the `Restore` button to start the process, or click the `Cancel` button to exit without saving your work. A popup will confirm if the restore is successful. +When you’ve specified the details that will be incorporated into the pg_restore command, click the `Restore` button to start the process, or click the `Cancel` button to exit without saving your work. A popup will confirm if the restore is successful. ![Restore dialog - Successful Notifications popup](../images/restore_messages.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/01_database_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/01_database_dialog.mdx index ef7eafe6ab0..b50103bd09b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/01_database_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/01_database_dialog.mdx @@ -4,7 +4,6 @@ title: "Database Dialog" - Use the `Database` dialog to define or modify a database. To create a database, you must be a database superuser or have the CREATE privilege. The `Database` dialog organizes the development of a database through the following dialog tabs: `General`, `Definition`, `Security`, and `Parameters`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -69,7 +68,7 @@ Click the `Advanced` tab to continue. Use the `Advanced` tab to set advanced parameters for the database. -- Use `Schema restriction` field to provide a SQL restriction that will be used against the pg\_namespace table to limit the schemas that you see. For example, you might enter: `public` so that only `public` are shown in the pgAdmin browser.Separate entries with a comma or tab as you type. +- Use `Schema restriction` field to provide a SQL restriction that will be used against the pg_namespace table to limit the schemas that you see. For example, you might enter: `public` so that only `public` are shown in the pgAdmin browser.Separate entries with a comma or tab as you type. Click the `SQL` tab to continue. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/02_move_objects.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/02_move_objects.mdx index 38aa81caa32..4e3b134b415 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/02_move_objects.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/02_move_objects.mdx @@ -4,7 +4,6 @@ title: "Move Objects Dialog" - Use the `Move Objects` dialog to to move database objects from one tablespace to another tablespace. The `Move Objects` dialog organizes the movement of database objects with the `General` tab; the `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/03_resource_group_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/03_resource_group_dialog.mdx index 22510112237..3d22c5a3812 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/03_resource_group_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/03_resource_group_dialog.mdx @@ -4,7 +4,6 @@ title: "Resource Group Dialog" - Use the `Resource Group` dialog to create a resource group and set values for its resources. A resource group is a named, global group on which various resource usage limits can be defined. The resource group is accessible from all databases in the cluster. To use the `Resource Group` dialog, you must have superuser privileges. Please note that resource groups are supported when connected to EDB Postgres Advanced Server; for more information about using resource groups, please see the EDB Postgres Advanced Server Guide, available at: > diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/04_role_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/04_role_dialog.mdx index cda91d01e70..03c08c96910 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/04_role_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/04_role_dialog.mdx @@ -4,7 +4,6 @@ title: "Login/Group Role Dialog" - Use the `Login/Group Role` dialog to define a role. A role may be an individual user (with or without login privileges) or a group of users. Note that roles defined at the cluster level are shared by all databases in the cluster. The `Login/Group Role` dialog organizes the creation and management of roles through the following dialog tabs: `General`, *Definition*, `Privileges`, *Parameters*, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -48,7 +47,7 @@ Click the `Parameters` tab to continue. ![create Role dialog - Parameters tab](../../images/role_parameters.png) -Use the fields on the `Parameters` tab to set session defaults for a selected configuration parameter when the role is connected to a specified database. This tab invokes the ALTER ROLE... SET configuration\_parameter syntax. Click the `Add` icon (+) to assign a value for a parameter. +Use the fields on the `Parameters` tab to set session defaults for a selected configuration parameter when the role is connected to a specified database. This tab invokes the ALTER ROLE... SET configuration_parameter syntax. Click the `Add` icon (+) to assign a value for a parameter. - Use the drop-down listbox in the `Name` field to select a parameter. - Use the `Value` field to specify a value for the parameter. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/05_tablespace_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/05_tablespace_dialog.mdx index 0044c09e285..feb9798d622 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/05_tablespace_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/05_tablespace_dialog.mdx @@ -4,7 +4,6 @@ title: "Tablespace Dialog" - Use The `Tablespace` dialog to define a tablespace. A tablespace allows superusers to define an alternative location on the file system where the data files containing database objects (such as tables and indexes) reside. Tablespaces are only supported on systems that support symbolic links. Note that a tablespace cannot be used independently of the cluster in which it is defined. The `Tablespace` dialog organizes the definition of a tablespace through the following tabs: `General`, *Definition*, `Parameters`, and `Security`. The *SQL* tab displays the SQL code generated by dialog selections. @@ -63,7 +62,7 @@ The following is an example of the sql command generated by user selections in t ![Create Tablespace dialog - SQL tab](../../images/tablespace_sql.png) -The example shown demonstrates creating a tablespace named `space_01`. It has a *random\_page\_cost* value equal to `1`. +The example shown demonstrates creating a tablespace named `space_01`. It has a *random_page_cost* value equal to `1`. - Click the `Info` button (i) to access online help. - Click the `Save` button to save work. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/index.mdx index 9dd8ffd867c..388e26d7a17 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/10_managing_cluster_objects/index.mdx @@ -4,13 +4,12 @@ title: "Managing Cluster Level Objects" - Some object definitions reside at the cluster level; PEM provides dialogs that allow you to create these objects, manage them, and control their relationships to each other. To access a dialog that allows you to create a database object, right-click on the object type in the Browser tree control, and select the `Create` option for that object. For example, to create a new database, right-click on the `Databases` node, and select `Create Database...` Contents:
-database\_dialog move\_objects resource\_group\_dialog role\_dialog tablespace\_dialog +database_dialog move_objects resource_group_dialog role_dialog tablespace_dialog
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/01_cast_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/01_cast_dialog.mdx index 3be8f1021ae..1c1ae7d5080 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/01_cast_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/01_cast_dialog.mdx @@ -4,7 +4,6 @@ title: "Cast Dialog" - Use the `Cast` dialog to define a cast. A cast specifies how to convert a value from one data type to another. The `Cast` dialog organizes the development of a cast through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/02_collation_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/02_collation_dialog.mdx index f3003180b5c..3bbd6a61344 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/02_collation_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/02_collation_dialog.mdx @@ -4,7 +4,6 @@ title: "Collation Dialog" - Use the `Collation` dialog to define a collation. A collation is an SQL schema object that maps a SQL name to operating system locales. To create a collation, you must have a CREATE privilege on the destination schema. The `Collation` dialog organizes the development of a collation through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/03_domain_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/03_domain_dialog.mdx index 3916cec807f..792f06de978 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/03_domain_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/03_domain_dialog.mdx @@ -4,7 +4,6 @@ title: "Domain Dialog" - Use the `Domain` dialog to define a domain. A domain is a data type definition that may constrain permissible values. Domains are useful when you are creating multiple tables that contain comparable columns; you can create a domain that defines constraints that are common to the columns and re-use the domain definition when creating the columns, rather than individually defining each set of constraints. The `Domain` dialog organizes the development of a domain through the following tabs: `General`, `Definition`, `Constraints`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/04_domain_constraint_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/04_domain_constraint_dialog.mdx index 0d08d185bf5..bda96929e86 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/04_domain_constraint_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/04_domain_constraint_dialog.mdx @@ -4,7 +4,6 @@ title: "Domain Constraints Dialog" - Use the `Domain Constraints` dialog to create or modify a domain constraint. A domain constraint confirms that the values provided for a domain meet a defined criteria. The `Domain Constraints` dialog implements options of the ALTER DOMAIN command. The `Domain Constraints` dialog organizes the development of a domain constraint through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/05_event_trigger_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/05_event_trigger_dialog.mdx index fdc8bd470bd..3dfec3f72df 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/05_event_trigger_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/05_event_trigger_dialog.mdx @@ -4,7 +4,6 @@ title: "Event Trigger Dialog" - Use the `Domain Trigger` dialog to define an event trigger. Unlike regular triggers, which are attached to a single table and capture only DML events, event triggers are global to a particular database and are capable of capturing DDL events. Like regular triggers, event triggers can be written in any procedural language that includes event trigger support, or in C, but not in SQL. The `Domain Trigger` dialog organizes the development of a event trigger through the following dialog tabs: `General`, `Definition`, and `Security Labels`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -24,7 +23,7 @@ Click the `Definition` tab to continue. Use the fields in the `Definition` tab to define the event trigger: - Select a value from the drop down of `Trigger Enabled` field to specify a status Select a value from the drop down of `Trigger Enabled` field to specify a status -- Use the drop-down listbox next to `Trigger function` to specify an existing function. A trigger function takes an empty argument list, and returns a value of type event\_trigger. +- Use the drop-down listbox next to `Trigger function` to specify an existing function. A trigger function takes an empty argument list, and returns a value of type event_trigger. - Select a value from the drop down of `Events` field to specify when the event trigger will fire: `DDL COMMAND START`, `DDL COMMAND END`, or `SQL DROP`. - Use the `When TAG in` field to enter filter values for TAG for which the trigger will be executed. The values must be in single quotes separated by comma. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/06_extension_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/06_extension_dialog.mdx index eb234396f9e..d667bc5890a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/06_extension_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/06_extension_dialog.mdx @@ -4,7 +4,6 @@ title: "Extension Dialog" - Use the `Extension` dialog to install a new extension into the current database. An extension is a collection of SQL objects that add targeted functionality to your Postgres installation. The `Extension` dialog adds the functionality of an extension to the current database only; you must register the extension in each database that use the extension. Before you load an extension into a database, you should confirm that any pre-requisite files are installed. The `Extension` dialog allows you to implement options of the CREATE EXTENSION command through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/07_foreign_data_wrapper_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/07_foreign_data_wrapper_dialog.mdx index c0ba7830b4e..284b5f24357 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/07_foreign_data_wrapper_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/07_foreign_data_wrapper_dialog.mdx @@ -4,7 +4,6 @@ title: "Foreign Data Wrapper Dialog" - Use the `Foreign Data Wrapper` dialog to create or modify a foreign data wrapper. A foreign data wrapper is an adapter between a Postgres database and data stored on another data source. You must be a superuser to create a foreign data wrapper. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/08_foreign_server_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/08_foreign_server_dialog.mdx index dd5b90693ad..c2d396137ac 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/08_foreign_server_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/08_foreign_server_dialog.mdx @@ -4,7 +4,6 @@ title: "Foreign Server Dialog" - Use the `Foreign Server` dialog to create a foreign server. A foreign server typically encapsulates connection information that a foreign-data wrapper uses to access an external data resource. Each foreign data wrapper may connect to a different foreign server; in the `Browser` tree control, expand the node of the applicable foreign data wrapper to launch the `Foreign Server` dialog. The `Foreign Server` dialog organizes the development of a foreign server through the following dialog tabs: `General`, `Definition`, `Options`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/09_foreign_table_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/09_foreign_table_dialog.mdx index df1844404a5..6edb02e383a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/09_foreign_table_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/09_foreign_table_dialog.mdx @@ -4,7 +4,6 @@ title: "Foreign Table Dialog" - Use the `Foreign Table` dialog to define a foreign table in the current database. Foreign tables define the structure of an external data source that resides on a foreign server. The `Foreign Table` dialog organizes the development of a foreign table through the following dialog tabs: `General`, `Definition`, `Columns`, `Constraints`, `Options`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/10_fts_configuration_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/10_fts_configuration_dialog.mdx index e7a2bc5de1d..53c3d6c096a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/10_fts_configuration_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/10_fts_configuration_dialog.mdx @@ -4,7 +4,6 @@ title: "FTS Configuration dialog" - Use the `FTS Configuration` dialog to configure a full text search. A text search configuration specifies a text search parser that can divide a string into tokens, along with dictionaries that can identify searchable tokens. The `FTS Configuration` dialog organizes the development of a FTS configuration through the following dialog tabs: "`General`, `Definition`, and `Tokens`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/11_fts_dictionary_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/11_fts_dictionary_dialog.mdx index d4fd157fb6b..2e756e7a9e1 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/11_fts_dictionary_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/11_fts_dictionary_dialog.mdx @@ -4,7 +4,6 @@ title: "FTS Dictionary Dialog" - Use the `FTS Dictionary` dialog to create a full text search dictionary. You can use a predefined templates or create a new dictionary with custom parameters. The `FTS Dictionary` dialog organizes the development of a FTS dictionary through the following dialog tabs: `General`, `Definition`, and `Options`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/12_fts_parser_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/12_fts_parser_dialog.mdx index cb2520aed65..63e96667e86 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/12_fts_parser_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/12_fts_parser_dialog.mdx @@ -4,7 +4,6 @@ title: "FTS Parser Dialog" - Use the `FTS Parser` dialog to create a new text search parser. A text search parser defines a method for splitting a text string into tokens and assigning types (categories) to the tokens. The `FTS Parser` dialog organizes the development of a text search parser through the following dialog tabs: `General`, and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/13_fts_template_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/13_fts_template_dialog.mdx index aa97ce448f2..84750a22cb1 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/13_fts_template_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/13_fts_template_dialog.mdx @@ -4,7 +4,6 @@ title: "FTS Template Dialog" - Use the `FTS Template` dialog to create a new text search template. A text search template defines the functions that implement text search dictionaries. The `FTS Template` dialog organizes the development of a text search Template through the following dialog tabs: `General`, and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/14_function_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/14_function_dialog.mdx index 01e23abcd88..2de9e77ab3b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/14_function_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/14_function_dialog.mdx @@ -4,7 +4,6 @@ title: "Function Dialog" - Use the `Function` dialog to define a function. If you drop and then recreate a function, the new function is not the same entity as the old; you must drop existing rules, views, triggers, etc. that refer to the old function. The `Function` dialog organizes the development of a function through the following dialog tabs: `General`, `Definition`, `Code`, `Options`, `Parameters`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -62,7 +61,7 @@ Use the fields in the `Options` tab to describe or modify the action of the func > -- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the function, in units of cpu\_operator\_cost. If the function returns a set, this is the cost per returned row. +- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the function, in units of cpu_operator_cost. If the function returns a set, this is the cost per returned row. - Use the `Estimated rows` field to specify a positive number giving the estimated number of rows that the query planner should expect the function to return. This is only allowed when the function is declared to return a set. The default assumption is 1000 rows. @@ -110,7 +109,7 @@ The following is an example of the sql command generated by selections made in t ![Create Function dialog - SQL tab](../../images/function_sql.png) -The example demonstrates creating an `edbspl` function named `emp_comp`. The function adds two columns (p\_sal and p\_comm), and then uses the result to compute a yearly salary, returning a NUMERIC value. +The example demonstrates creating an `edbspl` function named `emp_comp`. The function adds two columns (p_sal and p_comm), and then uses the result to compute a yearly salary, returning a NUMERIC value. - Click the `Info` button (i) to access online help.View context-sensitive help in the `Tabbed browser`, where a new tab displays the PostgreSQL core documentation. - Click the `Save` button to save work. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/15_language_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/15_language_dialog.mdx index 9d1849e8758..6d7de391baf 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/15_language_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/15_language_dialog.mdx @@ -4,7 +4,6 @@ title: "Language Dialog" - Use the CREATE LANGUAGE dialog to register a new procedural language. The `Language` dialog organizes the registration of a procedural language through the following dialog tabs: `General`, `Definition`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/16_materialized_view_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/16_materialized_view_dialog.mdx index 8b1ea0d5ef1..8ac532cb7a4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/16_materialized_view_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/16_materialized_view_dialog.mdx @@ -4,10 +4,9 @@ title: "Materialized View Dialog" - Use the `Materialized View` dialog to define a materialized view. A materialized view is a stored or cached view that contains the result set of a query. Use the REFRESH MATERIALIZED VIEW command to update the content of a materialized view. -The `Materialized View` dialog organizes the development of a materialized\_view through the following dialog tabs: `General`, `Definition`, `Storage`, `Parameter`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. +The `Materialized View` dialog organizes the development of a materialized_view through the following dialog tabs: `General`, `Definition`, `Storage`, `Parameter`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. ![Create Materialized View dialog - General tab](../../images/materialized_view_general.png) @@ -41,7 +40,7 @@ Click the `Parameter` tab to continue. Use the tabs nested inside the `Parameter` tab to specify VACUUM and ANALYZE thresholds; use the `Table` tab and the `Toast Table` tab to customize values for the table and the associated toast table. To change the default values: - Move the `Custom auto-vacuum?` switch to the `Yes` position to perform custom maintenance on the materialized view and to select values in the `Vacuum table`. The `Vacuum Table` provides default values for maintenance operations. -- Changing `Autovacuum enabled?` to `Not set` will reset autovacuum\_enabled. +- Changing `Autovacuum enabled?` to `Not set` will reset autovacuum_enabled. Click the `Security` tab to continue. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/17_package_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/17_package_dialog.mdx index 65d550b8330..64b8f79569e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/17_package_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/17_package_dialog.mdx @@ -4,7 +4,6 @@ title: "Package Dialog" - Use the `Package` dialog to create a (user-defined) package specification. The `Package` dialog organizes the management of a package through the following dialog tabs: `General`, `Header`, `Body`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/18_procedure_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/18_procedure_dialog.mdx index ff8f10cd5a4..b7284a798c8 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/18_procedure_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/18_procedure_dialog.mdx @@ -4,7 +4,6 @@ title: "Procedure Dialog" - Use the `Procedure` dialog to create a procedure; procedures are supported by PostgreSQL v11+ and EDB Postgres Advanced Server. The `Procedure` dialog allows you to implement options of the CREATE PROCEDURE command. The `Procedure` dialog organizes the development of a procedure through the following dialog tabs: `General`, *Definition*, `Options`, *Arguments*, *Parameters*, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -55,7 +54,7 @@ Use the fields in the `Options` tab to describe or modify the behavior of the pr - Move the `Security of definer?` switch to specify that the procedure is to be executed with the privileges of the user that created it. The default is `No`. -- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the procedure, in units of cpu\_operator\_cost. If the procedure returns a set, this is the cost per returned row. +- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the procedure, in units of cpu_operator_cost. If the procedure returns a set, this is the cost per returned row. - Move the `Leak proof?` switch to indicate whether the procedure has side effects — it reveals no information about its arguments other than by its return value. The default is `No`. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/19_schema_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/19_schema_dialog.mdx index 1847e3d1c1b..c9823d03051 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/19_schema_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/19_schema_dialog.mdx @@ -4,7 +4,6 @@ title: "Schema Dialog" - Use the `Schema` dialog to define a schema. A schema is the organizational workhorse of a database, similar to directories or namespaces. To create a schema, you must be a database superuser or have the CREATE privilege. The `Schema` dialog organizes the development of schema through the following dialog tabs: `General` and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/20_sequence_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/20_sequence_dialog.mdx index b73bdfeb928..393467f75a1 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/20_sequence_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/20_sequence_dialog.mdx @@ -4,7 +4,6 @@ title: "Sequence Dialog" - Use the `Sequence` dialog to create a sequence. A sequence generates unique values in a sequential order (not necessarily contiguous). The `Sequence` dialog organizes the development of a sequence through the following dialog tabs: `General`, *Definition*, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/21_synonym_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/21_synonym_dialog.mdx index 67efcd5bf83..1b8e80138d2 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/21_synonym_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/21_synonym_dialog.mdx @@ -4,7 +4,6 @@ title: "Synonym Dialog" - Use the `Synonym` dialog to substitute the name of a target object with a user-defined synonym. The `Synonym` dialog organizes the development of a synonym through the `General` tab. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/22_trigger_function_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/22_trigger_function_dialog.mdx index 09a339cf1e8..ec5f21b72ff 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/22_trigger_function_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/22_trigger_function_dialog.mdx @@ -4,8 +4,7 @@ title: "Trigger function Dialog" - -Use the `Trigger function` dialog to create or manage a trigger\_function. A trigger function defines the action that will be invoked when a trigger fires. +Use the `Trigger function` dialog to create or manage a trigger_function. A trigger function defines the action that will be invoked when a trigger fires. The `Trigger function` dialog organizes the development of a trigger function through the following dialog tabs: `General`, *Definition*, `Code`, *Options*, *Parameters* and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -57,7 +56,7 @@ Use the fields in the `Options` tab to describe or modify the action of the trig - Move the `Window?` switch to indicate that the trigger function is a window function rather than a plain function. The default is `No`. This is currently only useful for trigger functions written in C. -- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the trigger function, in units of cpu\_operator\_cost. If the function returns a set, this is the cost per returned row. +- Use the `Estimated cost` field to specify a positive number representing the estimated execution cost for the trigger function, in units of cpu_operator_cost. If the function returns a set, this is the cost per returned row. - Use the `Estimated rows` field to specify a positive number giving the estimated number of rows that the query planner should expect the trigger function to return. This is only allowed when the function is declared to return a set. The default assumption is 1000 rows. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/23_type_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/23_type_dialog.mdx index 7b71f957253..f96a56b22d7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/23_type_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/23_type_dialog.mdx @@ -4,7 +4,6 @@ title: "Type Dialog" - Use the `Type` dialog to register a custom data type. The `Type` dialog organizes the development of a data type through the following dialog tabs: `General`, *Definition*, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -60,15 +59,15 @@ If you select `External`, the `Definition` tab displays the `External Type` pane On the `Required` tab: -- Use the drop-down listbox next to the `Input function` field to add an input\_function. The input\_function converts the type's external textual representation to the internal representation used by the operators and functions defined for the type. -- Use the drop-down listbox next to the `Output function` field to add an output\_function. The output\_function converts the type's internal representation used by the operators and functions defined for the type to the type's external textual representation. +- Use the drop-down listbox next to the `Input function` field to add an input_function. The input_function converts the type's external textual representation to the internal representation used by the operators and functions defined for the type. +- Use the drop-down listbox next to the `Output function` field to add an output_function. The output_function converts the type's internal representation used by the operators and functions defined for the type to the type's external textual representation. On the `Optional-1` tab: -- Use the drop-down listbox next to the optional `Receive Function` field to select a receive\_function. The optional receive\_function converts the type's external binary representation to the internal representation. If this function is not supplied, the type cannot participate in binary input. -- Use the drop-down listbox next to the optional `Send function` field to select a send\_function. The optional send\_function converts from the internal representation to the external binary representation. If this function is not supplied, the type cannot participate in binary output. -- Use the drop-down listbox next to the optional `Typmod in function` field tab to select a type\_modifier\_input\_function. -- Use the drop-down listbox next to the optional `Typmod out function` field tab to select a type\_modifier\_output\_function. It is allowed to omit the type\_modifier\_output\_function, in which case the default display format is the stored typmod integer value enclosed in parentheses. +- Use the drop-down listbox next to the optional `Receive Function` field to select a receive_function. The optional receive_function converts the type's external binary representation to the internal representation. If this function is not supplied, the type cannot participate in binary input. +- Use the drop-down listbox next to the optional `Send function` field to select a send_function. The optional send_function converts from the internal representation to the external binary representation. If this function is not supplied, the type cannot participate in binary output. +- Use the drop-down listbox next to the optional `Typmod in function` field tab to select a type_modifier_input_function. +- Use the drop-down listbox next to the optional `Typmod out function` field tab to select a type_modifier_output_function. It is allowed to omit the type_modifier_output_function, in which case the default display format is the stored typmod integer value enclosed in parentheses. - Use the optional `Internal length` to specify a value for internal representation. - Move the `Variable?` switch to specify the internal representation is of variable length (VARIABLE). The default is a fixed length positive integer. - Specify a default value in the optional `Default` field in cases where a column of the data type defaults to something other than the null value. Specify the default with the DEFAULT key word. (A default can be overridden by an explicit DEFAULT clause attached to a particular column.) @@ -93,7 +92,7 @@ If you select `Range` in the `Type` field, the `Definition` tab displays the `Ra - Use the drop-down listbox next to `Sub-type operator class` to use a non-default operator class. - Use the drop-down listbox next to `Collation` to use a non-default collation in the range's ordering if the sub-type is collatable. - Use the drop-down listbox next to `Canonical function` to convert range values to a canonical form. -- Use the drop-down listbox next to `Sub-type diff function` to select a user-defined subtype\_diff function. +- Use the drop-down listbox next to `Sub-type diff function` to select a user-defined subtype_diff function. If you select `Shell` in the `Type` field, the `Definition` tab displays the `Shell` panel: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/24_user_mapping_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/24_user_mapping_dialog.mdx index 9d584a492c1..9c63c6457b5 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/24_user_mapping_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/24_user_mapping_dialog.mdx @@ -4,7 +4,6 @@ title: "User Mapping Dialog" - Use the `User Mapping` dialog to define a new mapping of a user to a foreign server. The `User Mapping` dialog organizes the development of a user mapping through the following dialog tabs: `General` and `Options`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/25_view_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/25_view_dialog.mdx index 6023c48c5ed..8351d9098a8 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/25_view_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/25_view_dialog.mdx @@ -4,7 +4,6 @@ title: "View Dialog" - Use the `View` dialog to define a view. The view is not physically materialized; the query is executed each time the view is referenced in a query. The `View` dialog organizes the development of a View through the following dialog tabs: `General`, *Definition*, `Code` and *Security*". The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/index.mdx index d79100946d3..0b29b8a24d8 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/11_managing_database_objects/index.mdx @@ -4,7 +4,6 @@ title: "Managing Database Objects" - PEM provides simple but powerful dialogs that you can use to design and create database objects. Each dialog contains a series of tabs that you use to describe the object that will be created by the dialog; the SQL tab displays the SQL command that the server will execute when creating the object. To access a dialog that allows you to create a database object, right-click on the object type in the Browser tree control, and select the `Create` option for that object. For example, to create a new cast, right-click on the `Casts` node, and select Create Cast... @@ -13,6 +12,6 @@ Contents:
-cast\_dialog collation\_dialog domain\_dialog domain\_constraint\_dialog event\_trigger\_dialog extension\_dialog foreign\_data\_wrapper\_dialog foreign\_server\_dialog foreign\_table\_dialog fts\_configuration\_dialog fts\_dictionary\_dialog fts\_parser\_dialog fts\_template\_dialog function\_dialog language\_dialog materialized\_view\_dialog package\_dialog procedure\_dialog schema\_dialog sequence\_dialog synonym\_dialog trigger\_function\_dialog type\_dialog user\_mapping\_dialog view\_dialog +cast_dialog collation_dialog domain_dialog domain_constraint_dialog event_trigger_dialog extension_dialog foreign_data_wrapper_dialog foreign_server_dialog foreign_table_dialog fts_configuration_dialog fts_dictionary_dialog fts_parser_dialog fts_template_dialog function_dialog language_dialog materialized_view_dialog package_dialog procedure_dialog schema_dialog sequence_dialog synonym_dialog trigger_function_dialog type_dialog user_mapping_dialog view_dialog
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/01_check_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/01_check_dialog.mdx index 90bdefbd687..c57efb14158 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/01_check_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/01_check_dialog.mdx @@ -4,7 +4,6 @@ title: "Check Dialog" - Use the `Check` dialog to define or modify a check constraint. A check constraint specifies an expression that produces a Boolean result that new or updated rows must satisfy for an insert or update operation to succeed. The `Check` dialog organizes the development of a check constraint through the `General` and `Definition` tabs. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/02_column_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/02_column_dialog.mdx index 057fff327df..9ac82206a72 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/02_column_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/02_column_dialog.mdx @@ -4,7 +4,6 @@ title: "Column Dialog" - Use the `Column` dialog to add a column to an existing table or modify a column definition. The `Column` dialog organizes the development of a column through the following dialog tabs: `General`, `Definition`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/03_compound_trigger_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/03_compound_trigger_dialog.mdx index fcfb8db8a47..885f5bde0ae 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/03_compound_trigger_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/03_compound_trigger_dialog.mdx @@ -4,7 +4,6 @@ title: "Compound Trigger Dialog" - Use the `Compound Trigger` dialog to create a compound trigger or modify an existing compound trigger. `Compound Trigger` is supported only for EPAS server 12 and above. A compound trigger executes a specified code when certain events occur. The `Compound Trigger` dialog organizes the development of a compound trigger through the following dialog tabs: `General`, `Events`, and `Code`. The SQL tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/04_exclusion_constraint_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/04_exclusion_constraint_dialog.mdx index 5887e94bc81..76d3d16acc1 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/04_exclusion_constraint_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/04_exclusion_constraint_dialog.mdx @@ -4,7 +4,6 @@ title: "Exclusion constraint Dialog" - Use the `Exclusion constraint` dialog to define or modify the behavior of an exclusion constraint. An exclusion constraint guarantees that if any two rows are compared on the specified column or expression (using the specified operator), at least one of the operator comparisons will return false or null. The `Exclusion constraint` dialog organizes the development of an exclusion constraint through the following dialog tabs: `General`, `Definition`, and `Columns`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/05_foreign_key_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/05_foreign_key_dialog.mdx index 6be0439a0c9..be99627e39a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/05_foreign_key_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/05_foreign_key_dialog.mdx @@ -4,7 +4,6 @@ title: "Foreign key Dialog" - Use the `Foreign key` dialog to specify the behavior of a foreign key constraint. A foreign key constraint maintains referential integrity between two tables. A foreign key constraint cannot be defined between a temporary table and a permanent table. The `Foreign key` dialog organizes the development of a foreign key constraint through the following dialog tabs: `General`, `Definition`, `Columns`, and `Action`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -61,7 +60,7 @@ Use the drop-down listboxes on the `Action` tab to specify behavior related to t The supported actions are: | | | -|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | NO ACTION | Produce an error indicating that the deletion or update will create a foreign key constraint violation. If the constraint is deferred, this error will be produced at constraint check time if any referencing rows still exist. This is the default. | | RESTRICT | Throw an error indicating that the deletion or update would create a foreign key constraint violation. This is the same as NO ACTION except that the check is not deferrable. | | CASCADE | Delete any rows referencing the deleted row, or update the values of the referencing column(s) to the new values of the referenced columns, respectively. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/06_index_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/06_index_dialog.mdx index 6ffae054da8..856f5198c92 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/06_index_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/06_index_dialog.mdx @@ -4,7 +4,6 @@ title: "Index Dialog" - Use the `Index` dialog to create an index on a specified table or materialized view. The `Index` dialog organizes the development of a index through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/07_primary_key_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/07_primary_key_dialog.mdx index fda0778c1d6..6fdd7e92d00 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/07_primary_key_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/07_primary_key_dialog.mdx @@ -4,7 +4,6 @@ title: "Primary key Dialog" - Use the `Primary key` dialog to create or modify a primary key constraint. A primary key constraint indicates that a column, or group of columns, uniquely identifies rows in a table. This requires that the values in the selected column(s) be both unique and not null. The `Primary key` dialog organizes the development of a primary key constraint through the `General` and `Definition` tabs. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/08_rls_policy_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/08_rls_policy_dialog.mdx index 0a7607ef81d..417208ac0a0 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/08_rls_policy_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/08_rls_policy_dialog.mdx @@ -4,7 +4,6 @@ title: "RLS Policy Dialog" - Use the `RLS Policy` dialog to Create a Row Level Security Policy.
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/09_rule_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/09_rule_dialog.mdx index 713c26c81c2..9ee162ce279 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/09_rule_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/09_rule_dialog.mdx @@ -4,7 +4,6 @@ title: "Rule Dialog" - Use the `Rule` dialog to define or modify a rule for a specified table or view. A PostgreSQL rule allows you to define an additional action that will be performed when a SELECT, INSERT, UPDATE, or DELETE is performed against a table. The `Rule` dialog organizes the development of a rule through the `General`, `Definition`, `Condition`, `Commands` tabs. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/10_table_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/10_table_dialog.mdx index 157c582f431..debbccce287 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/10_table_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/10_table_dialog.mdx @@ -4,7 +4,6 @@ title: "Table Dialog" - Use the `Table` dialog to create or modify a table. The `Table` dialog organizes the development of a table through the following dialog tabs: `General`, *Columns*, `Constraints`, *Advanced*, `Parameter`, and `Security`. The `SQL` tab displays the SQL code generated by dialog selections. @@ -43,7 +42,7 @@ Click the `Constraints` tab to continue. Use the fields in the `Constraints` tab to provide a table or column constraint. Optional constraint clauses specify constraints (tests) that new or updated rows must satisfy for an `INSERT` or `UPDATE` operation to succeed. Select the appropriate constraint type by selecting one of the following tabs on the `Constraints` panel: | Tab Name | Constraint | -|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `Primary Key` | Provides a unique identifier for each row in the table. | | `Foreign Key` | Maintains referential integrity between two tables. | | `Check` | Requires data satisfies an expression or condition before insertion or modification. | @@ -123,7 +122,7 @@ Use the drop-down listboxes on the `Action` tab to specify behavior related to t The supported actions are: | | | -|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | NO ACTION | Produce an error indicating that the deletion or update will create a foreign key constraint violation. If the constraint is deferred, this error will be produced at constraint check time if any referencing rows still exist. This is the default. | | RESTRICT | Throw an error indicating that the deletion or update would create a foreign key constraint violation. This is the same as NO ACTION except that the check is not deferrable. | | CASCADE | Delete any rows referencing the deleted row, or update the values of the referencing column(s) to the new values of the referenced columns, respectively. | @@ -224,8 +223,8 @@ Use the fields in the `Advanced` tab to define advanced features for the table: - Move the `Force RLS Policy?` to the `Yes` position to force the policy on the owner of the table. - Use the drop-down listbox next to `Of type` to copy the table structure from the specified composite type. Please note that a typed table will be dropped if the type is dropped (with DROP TYPE ... CASCADE). - Use the `Fill Factor` field to specify a fill factor for the table. The fill factor for a table is a percentage between 10 and 100. 100 (complete packing) is the default. -- Use the `Toast tuple target` field to set toast\_tuple\_target storage parameter of the table. The toast\_tuple\_target value is in bytes and has minimum value of 128. This field will be enabled only for PostgreSQL version >= 11 -- Use the `Parallel workers` field to set parallel\_workers storage parameter of the table. The parallel\_workers sets the number of workers that should be used to assist a parallel scan of the table. This field will be enabled only for PostgreSQL version >= 9.6 +- Use the `Toast tuple target` field to set toast_tuple_target storage parameter of the table. The toast_tuple_target value is in bytes and has minimum value of 128. This field will be enabled only for PostgreSQL version >= 11 +- Use the `Parallel workers` field to set parallel_workers storage parameter of the table. The parallel_workers sets the number of workers that should be used to assist a parallel scan of the table. This field will be enabled only for PostgreSQL version >= 9.6 - Move the `Has OIDs?` switch to the `Yes` position to specify that each row within a table has a system-assigned object identifier. The default is `No`. - Move the `Unlogged?` switch to the `Yes` position to disable logging for the table. Data written to an unlogged table is not written to the write-ahead log. Any indexes created on an unlogged table are automatically unlogged as well. The default is `No`. @@ -246,7 +245,7 @@ Click the `Partition` tab to continue. Use the fields in the `partition` tab to create the partitions for the table: -- Select a partition type from the `Partition Type` selection box. There are 3 options available; Range, List and Hash. Hash option will only enable for PostgreSQL version >= 11. +- Select a partition type from the `Partition Type` selection box. There are 3 options available; Range, List and Hash. Hash option will only enable for PostgreSQL version >= 11. Use the `Partition Keys` panel to define the partition keys. Click the `Add` icon (+) to add each partition keys selection: @@ -280,7 +279,7 @@ Click the `Parameter` tab to continue. Use the tabs nested inside the `Parameter` tab to specify VACUUM and ANALYZE thresholds; use the `Table` tab and the `Toast Table` tab to customize values for the table and the associated toast table: - Move the `Custom auto-vacuum?` switch to the `Yes` position to perform custom maintenance on the table and to select values in the `Vacuum table`. The `Vacuum Table` provides default values for maintenance operations. -- Changing `Autovacuum enabled?` to `Not set` will reset autovacuum\_enabled. +- Changing `Autovacuum enabled?` to `Not set` will reset autovacuum_enabled. Provide a custom value in the `Value` column for each metric listed in the `Label` column. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/11_trigger_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/11_trigger_dialog.mdx index 612b153ce20..90c87d4ca48 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/11_trigger_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/11_trigger_dialog.mdx @@ -4,7 +4,6 @@ title: "Trigger Dialog" - Use the `Trigger` dialog to create a trigger or modify an existing trigger. A trigger executes a specified function when certain events occur. The `Trigger` dialog organizes the development of a trigger through the following dialog tabs: `General`, *Definition*, `Events`, and `Code`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/12_unique_constraint_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/12_unique_constraint_dialog.mdx index a72a6a7585e..b19e923bdf7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/12_unique_constraint_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/12_unique_constraint_dialog.mdx @@ -4,7 +4,6 @@ title: "Unique Constraint Dialog" - Use the `Unique constraint` dialog to define a unique constraint for a specified table. Unique constraints ensure that the data contained in a column, or a group of columns, is unique among all the rows in the table. The `Unique constraint` dialog organizes the development of a unique constraint through the following dialog tabs: `General` and `Definition`. The `SQL` tab displays the SQL code generated by dialog selections. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/index.mdx index 0486c7cec51..79110c8d1af 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/12_modifying_tables/index.mdx @@ -4,7 +4,6 @@ title: "Creating or Modifying a Table" - PEM provides dialogs that allow you to modify all table properties and attributes. To access a dialog that allows you to create a database object, right-click on the object type in the `Browser` tree control, and select the `Create` option for that object. For example, to create a new table, Select a database from the tree control, select the schema under the database, right-click on the *Tables* node, and select *Create Table...* @@ -13,6 +12,6 @@ Contents:
-check\_dialog column\_dialog compound\_trigger\_dialog exclusion\_constraint\_dialog foreign\_key\_dialog index\_dialog primary\_key\_dialog rls\_policy\_dialog rule\_dialog table\_dialog trigger\_dialog unique\_constraint\_dialog +check_dialog column_dialog compound_trigger_dialog exclusion_constraint_dialog foreign_key_dialog index_dialog primary_key_dialog rls_policy_dialog rule_dialog table_dialog trigger_dialog unique_constraint_dialog
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/index.mdx index 37eb9b58161..a984ba3f5d3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/05_toc_pem_management_basics/index.mdx @@ -4,7 +4,6 @@ title: "Management Basics" - PEM provides a graphical interface that you can use to simplify management of your Postgres servers and the objects that reside on them. The Grant Wizard simplifies the task of privilege management; to open the Grant Wizard, highlight the name of a server, database, or schema in the PEM client tree control, and select `Grant Wizard...` from the `Tools` menu. @@ -13,7 +12,7 @@ Contents:
-grant\_wizard +grant_wizard
@@ -23,7 +22,7 @@ Contents:
-add\_restore\_point\_dialog import\_export\_data maintenance storage\_manager +add_restore_point_dialog import_export_data maintenance storage_manager
@@ -33,7 +32,7 @@ Contents:
-backup\_dialog backup\_globals\_dialog backup\_server\_dialog restore\_dialog +backup_dialog backup_globals_dialog backup_server_dialog restore_dialog
@@ -43,6 +42,6 @@ Contents:
-managing\_cluster\_objects managing\_database\_objects modifying\_tables +managing_cluster_objects managing_database_objects modifying_tables
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/01_managing_bart_prerequisites.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/01_managing_bart_prerequisites.mdx index f1599b24af2..d0ab64476bd 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/01_managing_bart_prerequisites.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/01_managing_bart_prerequisites.mdx @@ -4,10 +4,9 @@ title: "Prerequisites for managing BART" - - Before adding a BART server to the PEM console, you must manually install and configure BART on the BART host. For more information about installing and configuring BART, please see the `BART Installation Guide` available at: - + [https://www.enterprisedb.com/docs](/bart/latest/bart_inst/) - Before associating a database server with a BART server, you must install SSH on the database server and the BART server. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/02_configuring_bart_server.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/02_configuring_bart_server.mdx index cf2609d2113..2a61bbc9530 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/02_configuring_bart_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/02_configuring_bart_server.mdx @@ -4,7 +4,6 @@ title: "Configuring a BART Server" - You can use the `Create–BART server` dialog to register an existing BART server with the PEM server. To access the dialog, right-click on the `BART Servers` node and select `Create-BART Server`. ![Create-BART server dialog - General tab](../images/create_BART_server_general.png) @@ -25,7 +24,7 @@ Use the fields on the `General` tab to describe the general properties of the BA - Use the `pg_basebackup_path` field to specify the path to the `pg_basebackup` utility. -- Use the `Xlog/WAL` method field to specify how the transaction log should be collected during the execution of pg\_basebackup. The default option is `fetch`; it specifies that the transaction log files will be collected after the backup has completed. Set the `Xlog` method to `stream` to stream the transaction log in parallel with the full base backup creation. If streaming is used, the `max_wal_senders` configuration parameter in the `postgresql.conf` file for affected database servers must account for an additional session for the streaming of the transaction log (the setting must be a minimum of 2). +- Use the `Xlog/WAL` method field to specify how the transaction log should be collected during the execution of pg_basebackup. The default option is `fetch`; it specifies that the transaction log files will be collected after the backup has completed. Set the `Xlog` method to `stream` to stream the transaction log in parallel with the full base backup creation. If streaming is used, the `max_wal_senders` configuration parameter in the `postgresql.conf` file for affected database servers must account for an additional session for the streaming of the transaction log (the setting must be a minimum of 2). For more information about Xlog method, see: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/03_associating_bart_server_with_database_server.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/03_associating_bart_server_with_database_server.mdx index c43cad21fad..3e0c5ce563e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/03_associating_bart_server_with_database_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/03_associating_bart_server_with_database_server.mdx @@ -4,13 +4,12 @@ title: "Associating the BART Server with a Database Server" - After configuring the BART server, you need to associate it with the database server whose backup you want to manage with BART. You can do one of the following: - Use the PEM console to modify the properties of an existing monitored database server to map it to the newly configured BART server. - Use the PEM console to create a new monitored database server, and map it to the newly configured BART server. -To map the BART server to a new PEM database server, right-click the `PEM Server Directory` node and select `Create` > `Server`. Enter the details on all the generic tabs and then enter the BART-specific details on the `BART` tab. +To map the BART server to a new PEM database server, right-click the `PEM Server Directory` node and select `Create` > `Server`. Enter the details on all the generic tabs and then enter the BART-specific details on the `BART` tab. ![Create Server dialog (BART - General tab)](../images/create_server_bart_general.png) @@ -38,8 +37,8 @@ Use the fields on the `Misc` tab to describe the miscellaneous properties of the - Use the `Override default configuration?` Switch to specify if you want to override the BART server configurations with the specific database server configurations. - Use the `Xlog` method to specify how the transaction log should be collected during the execution of `pg_basebackup`. - Use the `Retention policy` field to specify the retention policy for the backup. This determines when an active backup should be marked as obsolete, and hence, be a candidate for deletion. You can specify the retention policy in terms of number of backup or in terms of duration (days, weeks, or months). -- Use the `WAL compression` switch to specify if you want to compress the archived Xlog/WAL files in Gzip format. To enable WAL compression, the gzip compression program must be present in the BART user account’s PATH. The wal\_compression setting must not be enabled for those database servers where you need to take incremental backups. -- Use the `Copy WALs during restore` field to specify how the archived WAL files are collected when invoking the RESTORE operation. Set to enabled to copy the archived WAL files from the BART backup catalog to the <restore\_path>/archived\_wals directory prior to the database server archive recovery. Set to disabled to retrieve the archived WAL files directly from the BART backup catalog during the database server archive recovery. +- Use the `WAL compression` switch to specify if you want to compress the archived Xlog/WAL files in Gzip format. To enable WAL compression, the gzip compression program must be present in the BART user account’s PATH. The wal_compression setting must not be enabled for those database servers where you need to take incremental backups. +- Use the `Copy WALs during restore` field to specify how the archived WAL files are collected when invoking the RESTORE operation. Set to enabled to copy the archived WAL files from the BART backup catalog to the <restore_path>/archived_wals directory prior to the database server archive recovery. Set to disabled to retrieve the archived WAL files directly from the BART backup catalog during the database server archive recovery. - Use the `Thread count` field to specify the number of threads to copy the blocks. You must set `thread count` to `1` if you want to take a backup with the `pg_basebackup` utility. - Use the `Batch size` field to specify the number of blocks of memory used for copying modified blocks, applicable only for incremental backups. - Use the `Scan interval` field to specify the number of seconds after which the WAL scanner should scan the new WAL files. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/04_viewing_bart_dashboard.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/04_viewing_bart_dashboard.mdx index 49831bc2ffa..b299366562f 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/04_viewing_bart_dashboard.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/04_viewing_bart_dashboard.mdx @@ -4,7 +4,6 @@ title: "Viewing the BART Server Details on a PEM Dashboard" - Once the BART server is associated with the database server, you can see the entire backup and restore related details for that particular BART server on the PEM Dashboard. You can also perform operations such as restoration or deletion of a backup that is listed on the dashboard. ![BART Dashboard](../images/bart_backup_dashboard.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/05_scheduling_bart_backups.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/05_scheduling_bart_backups.mdx index ebba9e08737..f56792cfd8b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/05_scheduling_bart_backups.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/05_scheduling_bart_backups.mdx @@ -4,7 +4,6 @@ title: "Scheduling BART Backups" - To schedule a backup using BART, select `Schedule Backup` under `Tools` menu. You can see a list of scheduled backups with details such as `Logs`, *Last result*, `Database server`, *Last backup name*, `Started on`, *Type*, `Parent`, *Format*, `Verify checksum?`, and `Use pg_basebackup?`. Click the Add icon (+) to add a schedule for the backup. Enter the details in the schedule definition dialog: ![Schedule Backups dialog - General tab](../images/BART_backup_scheduler_general.png) @@ -22,7 +21,7 @@ Use the fields on the `General` tab to describe the general properties of the ba - Use the `MBM scan timeout` field to specify the number of seconds to wait for required MBM files before timing out. - Use the `Checksum algorithm` field to specify checksum algorithm for MBM files of the backup. - Use the `Verify checksum` field to specify if you want the application to verify the checksum of the backup. -- Use the `pg_basebackup` field to specify if the pg\_basebackup utility should be used for the backup. Typically, pg\_basebackup utility is used only for backing up the replica servers since it cannot be used for incremental backups. +- Use the `pg_basebackup` field to specify if the pg_basebackup utility should be used for the backup. Typically, pg_basebackup utility is used only for backing up the replica servers since it cannot be used for incremental backups. ![Schedule Backups dialog - Schedule General tab](../images/BART_backup_scheduler_schedule_general.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/06_scheduling_bart_obsolete_backups_deletion.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/06_scheduling_bart_obsolete_backups_deletion.mdx index e9ba167b457..3ddecaedd73 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/06_scheduling_bart_obsolete_backups_deletion.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/06_scheduling_bart_obsolete_backups_deletion.mdx @@ -4,7 +4,6 @@ title: "Scheduling BART Obsolete Backups Deletion" - Use the `Schedule Obsolete Backup Deletion` dialog to schedule or modify a BART obsolete backup deletion. Use context menu from database server where BART has been configured. ![Schedule Obsolete Backup dialog - General tab](../images/BART_obsolete_backup_scheduler_general.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/07_bart_backup_dialog.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/07_bart_backup_dialog.mdx index efbffeb8297..3eb3898e335 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/07_bart_backup_dialog.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/07_bart_backup_dialog.mdx @@ -4,7 +4,6 @@ title: "BART Backup Dialog" - Use the `BART Backup` dialog to take ad-hoc backups using BART. This dialog can be opened using `Backup...` context menu of BART server node and `Backup...` sub menu of `BART` context menu of Database server node. ![BART Backup dialog - General tab](../images/bart_backup_dialog_general.png) @@ -21,7 +20,7 @@ Use the fields on the `General` tab to describe the general properties of the ba - Use the `Thread count` field to specify the number of threads that will copy the blocks. - Use the `Checksum algorithm` field to specify checksum algorithm for MBM files of the backup. - Use the `Verify checksum` field to specify if you want the application to verify the checksum of the backup. -- Use the `pg_basebackup` field to specify if the pg\_basebackup utility should be used for the backup. Typically, pg\_basebackup utility is used only for backing up the replica servers since it cannot be used for incremental backups. +- Use the `pg_basebackup` field to specify if the pg_basebackup utility should be used for the backup. Typically, pg_basebackup utility is used only for backing up the replica servers since it cannot be used for incremental backups. ![BART Backup dialog - Notifications tab](../images/bart_backup_dialog_notifications.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/08_restoring_bart_backups.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/08_restoring_bart_backups.mdx index d4e7beea764..26e1af500a0 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/08_restoring_bart_backups.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/08_restoring_bart_backups.mdx @@ -4,7 +4,6 @@ title: "Restoring BART Backups" - You can restore the backups that you have earlier created using BART server on a target remote host. When you select a particular BART server, all the associated backups are listed in the Dashboard under `Initiated Server Backups`. To restore a backup, click the `Restore` icon next to the backup that you want to restore. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/index.mdx index 28df15307fd..49150cbf9a7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/06_toc_pem_bart_management/index.mdx @@ -4,9 +4,6 @@ title: "Managing a BART Server" - - - Postgres Enterprise Manager (PEM) is designed to assist database administrators, system architects, and performance analysts when administering, monitoring, and tuning PostgreSQL and Advanced Server database servers. The EDB Backup and Recovery Tool (BART) is an administrative utility providing simplified backup and recovery management for multiple local or remote EDB Postgres Advanced Server and PostgreSQL database servers. For more information about BART, please visit the EnterpriseDB website at: @@ -19,7 +16,7 @@ Before you manage a BART server through PEM console, you must ensure that your s
-managing\_bart\_prerequisites +managing_bart_prerequisites
@@ -27,7 +24,7 @@ You must add a BART server to the PEM console and then associate that BART serve
-configuring\_bart\_server associating\_bart\_server\_with\_database\_server +configuring_bart_server associating_bart_server_with_database_server
@@ -35,6 +32,6 @@ After you associate the BART server with the database server, you will be able t
-viewing\_bart\_dashboard scheduling\_bart\_backups scheduling\_bart\_obsolete\_backups\_deletion bart\_backup\_dialog restoring\_bart\_backups +viewing_bart_dashboard scheduling_bart_backups scheduling_bart_obsolete_backups_deletion bart_backup_dialog restoring_bart_backups
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/01_sp_installing_sql_profiler.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/01_sp_installing_sql_profiler.mdx index b6d1d72df1e..fadade589db 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/01_sp_installing_sql_profiler.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/01_sp_installing_sql_profiler.mdx @@ -4,7 +4,6 @@ title: "Installing SQL Profiler" - SQL Profiler allows a database superuser to locate and optimize poorly-running SQL code. Users of Microsoft SQL Server’s Profiler will find PEM’s SQL Profiler very similar in operation and capabilities. SQL Profiler is installed with each Advanced Server instance; if you are using PostgreSQL, you must download the SQL Profiler installer or packages and install the SQL Profiler product into each managed database instance you wish to profile. SQL Profiler is officially supported only on the EnterpriseDB distributions of PostgreSQL version 9.4 or above and Advanced Server version 9.4 or above. The plugin is distributed via StackBuilder, or is available from the [EnterpriseDB website](https://www.enterprisedb.com/advanced-downloads) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/02_sp_configuring_sql_profiler.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/02_sp_configuring_sql_profiler.mdx index 169410483dc..025ea262f8d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/02_sp_configuring_sql_profiler.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/02_sp_configuring_sql_profiler.mdx @@ -4,7 +4,6 @@ title: "Configuring SQL Profiler" - The SQL Profiler plugin is not automatically enabled when the installation process completes. This allows you to restart the server at a convenient time, and prevents the plugin from being loaded unnecessarily on systems where it is not required on a continual basis. Use the following steps to enable the plugin for each database monitored by SQL Profiler: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/03_sp_create_new_trace.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/03_sp_create_new_trace.mdx index 0a77b59c111..3bcf394c6e9 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/03_sp_create_new_trace.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/03_sp_create_new_trace.mdx @@ -4,12 +4,11 @@ title: "Using SQL Profiler" - You can use SQL Profiler to create and store up to 15 named traces; use menu options to create and manage traces. ## Creating a Trace -You can use the Create trace... dialog to define a SQL Trace for any database on which SQL Profiler has been installed and configured. [installed and configured](../07_toc_pem_sql_profiler/#sp_installing_sql_profiler). To access the dialog, highlight the name of the database in the PEM client tree control; navigate through the `Management` menu to the `SQL Profiler` pull-aside menu, and select `Create trace...`. +You can use the Create trace... dialog to define a SQL Trace for any database on which SQL Profiler has been installed and configured. [installed and configured](01_sp_installing_sql_profiler/#sp_installing_sql_profiler). To access the dialog, highlight the name of the database in the PEM client tree control; navigate through the `Management` menu to the `SQL Profiler` pull-aside menu, and select `Create trace...`. ![Create trace dialog - Trace options tab](../images/sp_create_new_trace.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/04_sp_index_advisor.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/04_sp_index_advisor.mdx index 3891e3487f2..56d95bf5a5a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/04_sp_index_advisor.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/04_sp_index_advisor.mdx @@ -4,12 +4,11 @@ title: "Using Index Advisor" - Index Advisor helps you determine the application tables (and columns) on which you should create common B-tree type indexes. This can reduce the execution cost of queries you expect to use on your tables. Index Advisor comes pre-installed with EDB Postgres (R) Advanced Server. Index Advisor works with Advanced Server's query planner by creating "hypothetical indexes" for the query planner to use to calculate execution costs if such indexes were available. Before using Index Advisor, you must: -1. Modify the postgresql.conf file on each Advanced Server host, adding the index\_advisor library to the shared\_preload\_libraries parameter. +1. Modify the postgresql.conf file on each Advanced Server host, adding the index_advisor library to the shared_preload_libraries parameter. 2. Install the Index Advisor contrib module. To install the module, use the psql client or PEM Query Tool to connect to the database, and invoke the following command: @@ -29,6 +28,6 @@ Note
-It is recommended that you disable the index advisor while using the pg\_dump functionality. +It is recommended that you disable the index advisor while using the pg_dump functionality. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/05_sp_sql_profiler_tab.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/05_sp_sql_profiler_tab.mdx index afb93fa2b5d..129eec290ea 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/05_sp_sql_profiler_tab.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/05_sp_sql_profiler_tab.mdx @@ -4,7 +4,6 @@ title: "The SQL Profiler Tab" - ## Toolbar Options Toolbar options on the SQL Profiler tab allow you to define new traces, start or stop existing traces, open and search through previous traces, and filter trace results. @@ -14,15 +13,15 @@ Toolbar options on the SQL Profiler tab allow you to define new traces, start or Use the following options to manage your SQL Profiler traces: | Option | Action | Shortcut | -|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| `Menu` | Use options accessed through the drop\_down menu icon to manage SQL Profiler traces. | Accesskey + O | +| ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| `Menu` | Use options accessed through the drop_down menu icon to manage SQL Profiler traces. | Accesskey + O | | `Start Trace` | Select the `Start Trace` icon to start a new trace, using the attributes (user names, database names, comments, etc) that were defined for the trace currently displayed in the SQL Profiler dialog. | Accesskey + S | | `Stop Trace` | Select the `Stop Trace` icon to stop an executing trace. | Accesskey + Q | | `Refresh Trace` | Select the `Refresh Trace` icon to update the display to include any recent changes to an active trace. | Accesskey + R | | `Clear Trace` | Select the `Clear Trace` icon to delete the trace and close the SQL Profiler window. | Accesskey + C | | `Filter` | Select the `Filter` icon to define a new filter, or apply an existing filter to the trace. | Accesskey + T | | `Information` | Select the information icon to view the properties of the trace displayed in the SQL Profiler window. | Accesskey + P | -| `Index Advisor` | Select the `Index Advisor` icon to open the [PEM Index Advisor](../07_toc_pem_sql_profiler/#sp_index_advisor). \| Ac | cesskey + I | +| `Index Advisor` | Select the `Index Advisor` icon to open the [PEM Index Advisor](04_sp_index_advisor/#sp_index_advisor). \| Ac | cesskey + I | | `Download Trace` | Use options accessed through the `Download Trace` menu to download a CSV file that contains the trace events shown on the current page or the complete set of trace data. | Accesskey + X | | `Column Picker` | Click the `Column Picker` icon to choose the columns to be displayed in below table. | Accesskey + W | @@ -56,8 +55,8 @@ The Query/Metrics pane is located in the lower-left corner of the SQL Profiler w - The `Metrics` tab displays detailed statistical information about the execution of the query. The table below describes the metrics that are displayed in the Metrics dialog; the percentages listed describe the percentage of the total quantity of the parameter that is attributed to the selected SQL command: | **Property** | **Description** | -|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Executed (\#) | The number of times that the selected SQL command executed. | +| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Executed (#) | The number of times that the selected SQL command executed. | | Execution (%) | The percentage of the execution count that the SQL command represents. For example if the trace profiles 4 SQL commands, each command will represent 25% of the trace execution %. | | Duration (%) | The percentage of the total trace time consumed by the highlighted SQL Command. | | Rows updated (%) | The percentage of the rows updated during the trace that were updated by the selected SQL command. | @@ -85,4 +84,4 @@ The Query/Metrics pane is located in the lower-left corner of the SQL Profiler w The Graphical or Text-based explain pane displays one of two representations of the query execution plan for the selected query. - Select the Text-based Plan tab to display the execution plan for the currently highlighted event in text format: -- Select the Graphical-based Plan tab to display a graphical interpretation of the execution plan of the highlighted query. For more information about interpreting the graphical query plan, see [Interpreting the Graphical Query Plan](../08_toc_pem_developer_tools/#pem_interpreting_graphical_query). +- Select the Graphical-based Plan tab to display a graphical interpretation of the execution plan of the highlighted query. For more information about interpreting the graphical query plan, see [Interpreting the Graphical Query Plan](../08_toc_pem_developer_tools/03_pem_interpreting_graphical_query/#pem_interpreting_graphical_query). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/index.mdx index 21d10f5af7b..60648bb6637 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/07_toc_pem_sql_profiler/index.mdx @@ -4,14 +4,13 @@ title: "SQL Profiler" - -SQL Profiler captures statistical information and query execution plans for SQL statements executed during a trace session. You can use the information stored by SQL Profiler to identify performance issues. Before using SQL Profiler, you must [install and configure SQL Profiler](07_toc_pem_sql_profiler/#sp_installing_sql_profiler) on each database you intend to profile. +SQL Profiler captures statistical information and query execution plans for SQL statements executed during a trace session. You can use the information stored by SQL Profiler to identify performance issues. Before using SQL Profiler, you must [install and configure SQL Profiler](01_sp_installing_sql_profiler/#sp_installing_sql_profiler) on each database you intend to profile. Contents:
-sp\_installing\_sql\_profiler sp\_configuring\_sql\_profiler sp\_create\_new\_trace sp\_index\_advisor sp\_sql\_profiler\_tab +sp_installing_sql_profiler sp_configuring_sql_profiler sp_create_new_trace sp_index_advisor sp_sql_profiler_tab
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/01_debugger.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/01_debugger.mdx index d5af9b3d042..3960eeef092 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/01_debugger.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/01_debugger.mdx @@ -4,14 +4,13 @@ title: "pgAdmin Debugger" - ![Debugger page - Parameters tab](../images/debug_main.png) The debugger may be used to debug PL/pgSQL functions in PostgreSQL, as well as EDB-SPL functions, stored procedures and packages in Advanced Server. The Debugger is available as an extension for your PostgreSQL installation, and is distributed as part of Advanced Server. You must have superuser privileges to use the debugger. Before using the debugger, you must modify the `postgresql.conf` file, adding the server-side debugger components to the the value of the `shared_preload_libraries` parameter: -> shared\_preload\_libraries = '$libdir/`other_libraries`/plugin\_debugger' +> shared_preload_libraries = '$libdir/`other_libraries`/plugin_debugger' After modifying the `shared_preload_libraries` parameter, restart the server to apply the changes. @@ -53,7 +52,14 @@ The main debugger window consists of two panels and a context-sensitive toolbar. ![Debugger navigation toolbar](../images/debug_toolbar.png) -
OptionAction
Step into

Click the Step into icon to execute the currently highlighted line of code.

Step over

Click the Step over icon to execute a line of code, stepping over any sub-functions invoked by the code.

The sub-function executes, but is not debugged unless it contains a breakpoint.
Continue/Start

Click the Continue/Start icon to execute the highlighted code, and continue until the program

encounters a breakpoint or completes.
Toggle breakpoint

Use the Toggle breakpoint icon to enable or disable a breakpoint (without removing the breakpoint).

Clear all breakpoints

Click the Clear all breakpoints icon to remove all breakpoints from the program.

Stop

Click the Stop icon to halt the execution of a program.

+| Option | Action | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Step into` | Click the `Step into` icon to execute the currently highlighted line of code.
| +| `Step over` | Click the `Step over` icon to execute a line of code, stepping over any sub-functions invoked by the code.The sub-function executes, but is not debugged unless it contains a breakpoint. | +| `Continue/Start` | Click the `Continue/Start` icon to execute the highlighted code, and continue until the programencounters a breakpoint or completes. | +| `Toggle breakpoint` | Use the `Toggle breakpoint` icon to enable or disable a breakpoint (without removing the breakpoint).
| +| `Clear all breakpoints` | Click the `Clear all breakpoints` icon to remove all breakpoints from the program.
| +| `Stop` | Click the `Stop` icon to halt the execution of a program.
| The top panel of the debugger window displays the program body; click in the grey margin next to a line number to add a breakpoint. The highlighted line in the top panel is the line that is about to execute. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/02_query_tool.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/02_query_tool.mdx index 6969c315a91..9a19c67e9f2 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/02_query_tool.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/02_query_tool.mdx @@ -4,12 +4,11 @@ title: "Query Tool" - The Query Tool is a powerful, feature-rich environment that allows you to execute arbitrary SQL commands and review the result set. You can access the Query Tool via the `Query Tool` menu option on the `Tools` menu, or through the context menu of select nodes of the Browser tree control. The Query Tool allows you to: - Issue ad-hoc SQL queries. - Execute arbitrary SQL commands. -- Edit the result set of a SELECT query if it is [updatable](../08_toc_pem_developer_tools/#updatable-result-set). +- Edit the result set of a SELECT query if it is [updatable](#updatable-result-set). - Displays current connection and transaction status as configured by the user. - Save the data displayed in the output panel to a CSV file. - Review the execution plan of a SQL statement in either a text, a graphical format or a table format (similar to ). @@ -26,13 +25,32 @@ The Query Tool features two panels: **The Query Tool Toolbar** -The `Query Tool` toolbar uses context-sensitive icons that provide shortcuts to frequently performed tasks. If an icon is highlighted, the option is enabled; if the icon is grayed-out, the task is disabled. Please note that disabled icons may support functionality accessed via the [data editor](../08_toc_pem_developer_tools/04_editgrid/#editgrid). +The `Query Tool` toolbar uses context-sensitive icons that provide shortcuts to frequently performed tasks. If an icon is highlighted, the option is enabled; if the icon is grayed-out, the task is disabled. Please note that disabled icons may support functionality accessed via the [data editor](04_editgrid/#editgrid). ![Query Tool toolbar](../images/query_toolbar.png) Hover over an icon to display a tooltip that describes the icon's functionality: -
IconBehaviorShortcut
Open FileClick the Open File icon to display a previously saved query in the SQL Editor.Accesskey + O
Save

Click the Save icon to perform a quick-save of a previously saved query, or to access the Save menu:

  • Select Save to save the selected content of the SQL Editor panel in a file.
  • Select Save As to open a new browser dialog and specify a new location to which to save the selected content of the SQL Editor panel.
Accesskey + S
Find

Use the Find menu to search, replace, or navigate the code displayed in the SQL Editor:

  • Select Find to provide a search target, and search the SQL Editor contents.
  • Select Find next to locate the next occurrence of the search target.
  • Select Find previous to move to the last occurrence of the search target.
  • Select Pesistent find to identify all occurrences of the search target within the editor.
  • Select Replace to locate and replace (with prompting) individual occurrences of the target.
  • Select Replace all to locate and replace all occurrences of the target within the editor.
  • Select Jump to navigate to the next occurrence of the search target.

Cmd+F

Cmd+G

Cmd+Shift+G

Cmd+Shift+F

Alt+G

CopyClick the Copy icon to copy the content that is currently highlighted in the Data Output panel. when in View/Edit data mode.Accesskey + C
PasteClick the Paste icon to paste a previously row into a new row when in View/Edit data mode.Accesskey + P
DeleteClick the Delete icon to mark the selected rows for delete when in View/Edit data mode.Accesskey + D
Edit

Use options on the Edit menu to access text editing tools; the options operate on the text displayed in the SQL Editor panel when in Query Tool mode:

  • Select Indent Selection to indent the currently selected text.
  • Select Unindent Selection to remove indentation from the currently selected text.
  • Select Inline Comment Selection to enclose any lines that contain the selection in SQL style comment notation.
  • Select Inline Uncomment Selection to remove SQL style comment notation from the selected line.
  • Select Block Comment to enclose all lines that contain the selection in C style comment notation. This option acts as a toggle.

Tab

Shift+Tab

Cmd+/

Cmd+.

Shift+Cmd+/

Filter

Click the Filter icon to set filtering and sorting criteria for the data when in View/Edit data mode. Click the down arrow to access other filtering and sorting options:

  • Click Sort/Filter to open the sorting and filtering dialogue.
  • Click Filter by Selection to show only the rows containing the values in the selected cells.
  • Click Exclude by Selection to show only the rows that do not contain the values in the selected cells.
  • Click Remove Sort/Filter to remove any previously selected sort or filtering options.
Accesskey + F
Limit SelectorSelect a value in the Limit Selector to limit the size of the dataset to a number of rows.Accesskey + R
StopClick the Stop icon to cancel the execution of the currently running query.Accesskey + Q
Execute/Refresh

Click the Execute/Refresh icon to either execute or refresh the query highlighted in the SQL editor panel. Click the down arrow to access other execution options:

  • Add a check next to Auto-Rollback to instruct the server to automatically roll back a transaction if an error occurs during the transaction.
  • Add a check next to Auto-Commit to instruct the server to automatically commit each transaction. Any changes made by the transaction will be visible to others, and durable in the event of a crash.
F5
Explain
Click the Explain icon to view an explanation plan for the current query. The result of the

EXPLAIN is displayed graphically on the Explain tab of the output panel, and in text form on the Data Output tab.

F7
Explain analyze

Click the Explain analyze icon to invoke an EXPLAIN ANALYZE command on the current query.

Navigate through the Explain Options menu to select options for the EXPLAIN command:

  • Select Verbose to display additional information regarding the query plan.
  • Select Costs to include information on the estimated startup and total cost of each plan node, as well as the estimated number of rows and the estimated width of each row.
  • Select Buffers to include information on buffer usage.
  • Select Timing to include information about the startup time and the amount of time spent in each node of the query.
Shift+F7
CommitClick the Commit icon to commit the transaction.Shift+CTRL+M
RollbackClick the Rollback icon to rollback the transaction.Shift+CTRL+R
Clear

Use options on the Clear drop-down menu to erase display contents:

  • Select Clear Query Window to erase the content of the SQL Editor panel.
  • Select Clear History to erase the content of the History tab.
Accesskey + L
Download as CSVClick the Download as CSV icon to download the result set of the current query to a comma-separated list. You can specify the CSV settings through Preferences -> SQL Editor -> CSV output dialogue.F8
MacrosClick the Macros icon to manage the macros. You can create, edit or clear the macros through Manage Macros option.
+| Icon | Behavior | Shortcut | +| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `Open File` | Click the `Open File` icon to display a previously saved query in the SQL Editor. | Accesskey + O | +| `Save` | Click the `Save` icon to perform a quick-save of a previously saved query, or to access the `Save` menu:
- Select `Save` to save the selected content of the SQL Editor panel in a file.
- Select `Save As` to open a new browser dialog and specify a new location to which to save the selected content of the SQL Editor panel.
| Accesskey + S | +| `Find` | Use the `Find` menu to search, replace, or navigate the code displayed in the SQL Editor:
- Select `Find` to provide a search target, and search the SQL Editor contents.
- Select `Find next` to locate the next occurrence of the search target.
- Select `Find previous` to move to the last occurrence of the search target.
- Select `Pesistent find` to identify all occurrences of the search target within the editor.
- Select `Replace` to locate and replace (with prompting) individual occurrences of the target.
- Select `Replace all` to locate and replace all occurrences of the target within the editor.
- Select `Jump` to navigate to the next occurrence of the search target.
| Cmd+F

Cmd+G

Cmd+Shift+G

Cmd+Shift+F

Alt+G
| +| `Copy` | Click the `Copy` icon to copy the content that is currently highlighted in the Data Output panel. when in View/Edit data mode. | Accesskey + C | +| `Paste` | Click the `Paste` icon to paste a previously row into a new row when in View/Edit data mode. | Accesskey + P | +| `Delete` | Click the `Delete` icon to mark the selected rows for delete when in View/Edit data mode. | Accesskey + D | +| `Edit` | Use options on the `Edit` menu to access text editing tools; the options operate on the text displayed in the SQL Editor panel when in Query Tool mode:
- Select `Indent Selection` to indent the currently selected text.
- Select `Unindent Selection` to remove indentation from the currently selected text.
- Select `Inline Comment Selection` to enclose any lines that contain the selection in SQL style comment notation.
- Select `Inline Uncomment Selection` to remove SQL style comment notation from the selected line.
- Select `Block Comment` to enclose all lines that contain the selection in C style comment notation. This option acts as a toggle.
| Tab

Shift+Tab

Cmd+/

Cmd+.

Shift+Cmd+/
| +| `Filter` | Click the `Filter` icon to set filtering and sorting criteria for the data when in View/Edit data mode. Click the down arrow to access other filtering and sorting options:
- Click `Sort/Filter` to open the sorting and filtering dialogue.
- Click `Filter by Selection` to show only the rows containing the values in the selected cells.
- Click `Exclude by Selection` to show only the rows that do not contain the values in the selected cells.
- Click `Remove Sort/Filter` to remove any previously selected sort or filtering options.
| Accesskey + F | +| Limit Selector | Select a value in the `Limit Selector` to limit the size of the dataset to a number of rows. | Accesskey + R | +| `Stop` | Click the `Stop` icon to cancel the execution of the currently running query. | Accesskey + Q | +| `Execute/Refresh` | Click the `Execute/Refresh` icon to either execute or refresh the query highlighted in the SQL editor panel. Click the down arrow to access other execution options:
- Add a check next to `Auto-Rollback` to instruct the server to automatically roll back a transaction if an error occurs during the transaction.
- Add a check next to `Auto-Commit` to instruct the server to automatically commit each transaction. Any changes made by the transaction will be visible to others, and durable in the event of a crash.
| F5 | +| `Explain` | - Click the `Explain` icon to view an explanation plan for the current query. The result of the

EXPLAIN is displayed graphically on the `Explain` tab of the output panel, and in text form on the `Data Output` tab.
| F7 | +| `Explain analyze` | Click the `Explain analyze` icon to invoke an EXPLAIN ANALYZE command on the current query.

Navigate through the `Explain Options` menu to select options for the EXPLAIN command:
- Select `Verbose` to display additional information regarding the query plan.
- Select `Costs` to include information on the estimated startup and total cost of each plan node, as well as the estimated number of rows and the estimated width of each row.
- Select `Buffers` to include information on buffer usage.
- Select `Timing` to include information about the startup time and the amount of time spent in each node of the query.
| Shift+F7 | +| `Commit` | Click the `Commit` icon to commit the transaction. | Shift+CTRL+M | +| `Rollback` | Click the `Rollback` icon to rollback the transaction. | Shift+CTRL+R | +| `Clear` | Use options on the `Clear` drop-down menu to erase display contents:
- Select `Clear Query Window` to erase the content of the SQL Editor panel.
- Select `Clear History` to erase the content of the `History` tab.
| Accesskey + L | +| `Download as CSV` | Click the `Download as CSV` icon to download the result set of the current query to a comma-separated list. You can specify the CSV settings through `Preferences -> SQL Editor -> CSV output` dialogue. | F8 | +| `Macros` | Click the *Macros* icon to manage the macros. You can create, edit or clear the macros through *Manage Macros* option. | | **The SQL Editor Panel** @@ -88,7 +106,7 @@ Editable and read-only columns are identified using pencil and lock icons (respe The psycopg2 driver version should be equal to or above 2.8 for updatable query result sets to work. -An updatable result set is identical to the [Data Grid](../08_toc_pem_developer_tools/04_editgrid/#data-grid) in View/Edit Data mode, and can be modified in the same way. +An updatable result set is identical to the [Data Grid](04_editgrid/#data-grid) in View/Edit Data mode, and can be modified in the same way. If Auto-commit is off, the data changes are made as part of the ongoing transaction, if no transaction is ongoing a new one is initiated. The data changes are not committed to the database unless the transaction is committed. @@ -114,7 +132,8 @@ Please note that `EXPLAIN VERBOSE` cannot be displayed graphically. Click on a n Use the download button on top left corner of the `Explain` canvas to download the plan as an SVG file. -**Note:** Download as SVG is not supported on Internet Explorer. +!!! Note + Download as SVG is not supported on Internet Explorer. ![Query Tool - Explain tab - Graphical plan tab](../images/query_output_explain_details.png) @@ -126,7 +145,7 @@ Note that the query plan that accompanies the `Explain analyze` is available on background color of the exclusive, inclusive, and Rows X columns may vary based on the difference between actual vs planned. -If percentage of the exclusive/inclusive timings of the total query time is: > 90 - Red color > 50 - Orange (between red and yellow) color > 10 - Yellow color +If percentage of the exclusive/inclusive timings of the total query time is: > 90 - Red color > 50 - Orange (between red and yellow) color > 10 - Yellow color If planner mis-estimated number of rows (actual vs planned) by 10 times - Yellow color 100 times - Orange (between Red and Yellow) color 1000 times - Red color @@ -169,7 +188,7 @@ You can show or hide the queries generated internally by pgAdmin (during 'View/E To erase the content of the `Query History` tab, select `Clear history` from the `Clear` drop-down menu. -Query History is maintained across sessions for each database on a per-user basis when running in Query Tool mode. In View/Edit Data mode, history is not retained. By default, the last 20 queries are stored for each database. This can be adjusted in config\_local.py by overriding the MAX\_QUERY\_HIST\_STORED value. +Query History is maintained across sessions for each database on a per-user basis when running in Query Tool mode. In View/Edit Data mode, history is not retained. By default, the last 20 queries are stored for each database. This can be adjusted in config_local.py by overriding the MAX_QUERY_HIST_STORED value. ## Connection Status @@ -182,7 +201,7 @@ Change connection +\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\* User can connect to another server or database from existing open session of query tool. - Click on the connection link next to connection status. -- Now click on the *<New Connection>* option from the dropdown. +- Now click on the *<New Connection>* option from the dropdown. Query tool connection options diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/03_pem_interpreting_graphical_query.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/03_pem_interpreting_graphical_query.mdx index 4a9c86f1053..c89fad74c5d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/03_pem_interpreting_graphical_query.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/03_pem_interpreting_graphical_query.mdx @@ -4,7 +4,6 @@ title: "Interpreting Graphical Query Plans" - The graphical explain plan provides clues that can help you identify the aspects of the selected query that consume the most resources; within the diagram, thicker lines indicate the portions of the query that are expected to take the most processing time. To view a graphical interpretation of an executed query, select `Explain` or `Explain Analyze` from the `Execute/Refresh` drop-down menu. Please note that you can use the `Explain Options` pull-aside menu to specify the level of detail displayed for each node. @@ -17,8 +16,8 @@ Hover over an icon within the plan to view details for the selected node: Each query operator (within the selected query) is represented in the graphical display by an icon. The table below describes the Advanced Server query operators: -| Icon | Represents | Description | -|----------------------------------------------------------------------|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Icon | Represents | Description | +| ----------------------------------------------------------------------- | --------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ![Result set icon](../images/result_set_icon.png) | Result Set | The Result Set icon represents a simple result set in the query plan. Typically, a Result Set operator is used to evaluate a query that does not fetch data from a table. | | ![Aggregat icon](../images/aggregate_icon.png) | Aggregate | The server creates an Aggregate operator whenever the query invokes an aggregate function (a function that returns a value computed from multiple rows). Aggregate functions include: AVG(), COUNT(), MAX(), MIN(), STDDEV(), SUM(), and VARIANCE(). | | ![Window aggregate icon](../images/window_aggregate_icon.png) | Window Aggregate | The server may use a Window aggregate operator to implement windowed aggregate functions; a windowed aggregate function is a function that returns a value computed from a set of rows within the input. | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter.mdx index 630ae563aa0..b20db8a7891 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/01_viewdata_filter.mdx @@ -4,8 +4,7 @@ title: "View/Edit Data Filter" - -You can access `Data Filter dialog` by clicking on `Filtered Rows` toolbar button visible on the Browser panel or by selecting *View/Edit Data -> Filtered Rows* context menu option. +You can access `Data Filter dialog` by clicking on `Filtered Rows` toolbar button visible on the Browser panel or by selecting *View/Edit Data -> Filtered Rows* context menu option. This allows you to specify an SQL Filter to limit the data displayed in the edit grid window: diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/index.mdx index a874a9ed1cd..8df77395c4a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/04_editgrid/index.mdx @@ -4,7 +4,6 @@ title: "Reviewing and Editing Data" - To review or modify data, right click on a table or view name in the `Browser` tree control. When the context menu opens, use the `View/Edit Data` menu to specify the number of rows you would like to display in the editor panel. ![Edit grid window](../../images/editgrid.png) @@ -24,11 +23,23 @@ The toolbar includes context-sensitive icons that provide shortcuts to frequentl Hover over an icon to display a tooltip that describes the icon's functionality. -
IconBehaviorShortcut
SaveUse the Save icon to save your changes to the currently displayed table contents.
FindUse options on the Find menu to access Search and Replace functionality or to Jump to another line.Ctrl/Cmd +F
CopyClick the Copy icon to copy the currently selected data.Ctrl+C
Paste RowClick the Paste Row icon to paste the content that is currently on the clipboard.
Delete RowUse the Delete Row icon to delete all the selected rows from the output panel.
Filter

Click the Filter icon to open a dialog that allows you to write and apply a filter for the content currently displayed in the output panel. Click the down arrow to open the Filter drop-down menu and select from pre-defined options:

Use options on the Filter menu to quick-sort or quick-filter the data set:

  • Filter: This option opens a dialog that allows you to define a filter. A filter is a condition that is supplied to an arbitrary WHERE clause that restricts the result set.
  • Remove Filter: This option removes all selection / exclusion filter conditions.
  • By Selection: This option refreshes the data set and displays only those rows whose column value matches the value in the cell currently selected.
  • Exclude Selection: This option refreshes the data set and excludes those rows whose column value matches the value in the cell currently selected.
No limitUse the No limit drop-down listbox to specify how many rows to display in the output panel. Select from: No limit (the default), 1000 rows, 500 rows, or 100 rows.
Execute/RefreshClick the Execute/Refresh icon to execute the SQL command that is displayed in the top panel. If you have not saved modifications to the content displayed in the data grid, you will be prompted to confirm the execution. To preserve your changes before refreshing the content, click the Save toolbar button before executing the refresh.F5
StopClick the Stop icon to cancel the execution of the currently running query.
Clear HistoryUse the Clear History drop-down menu to erase the contents of the History tab.
Download as CSVClick the Download as CSV icon to download the result set of the current query to a comma-separated list. You can control the CSV settings through Preferences -> SQL Editor -> CSV output dialogue.F8
+| Icon | Behavior | Shortcut | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| `Save` | Use the `Save` icon to save your changes to the currently displayed table contents. | | +| `Find` | Use options on the `Find` menu to access Search and Replace functionality or to Jump to another line. | Ctrl/Cmd +F | +| `Copy` | Click the `Copy` icon to copy the currently selected data. | Ctrl+C | +| `Paste Row` | Click the `Paste Row` icon to paste the content that is currently on the clipboard. | | +| `Delete Row` | Use the `Delete Row` icon to delete all the selected rows from the output panel. | | +| `Filter` | Click the `Filter` icon to open a dialog that allows you to write and apply a filter for the content currently displayed in the output panel. Click the down arrow to open the `Filter` drop-down menu and select from pre-defined options:


Use options on the `Filter` menu to quick-sort or quick-filter the data set:
- Filter: This option opens a dialog that allows you to define a filter. A filter is a condition that is supplied to an arbitrary WHERE clause that restricts the result set.
- Remove Filter: This option removes all selection / exclusion filter conditions.
- By Selection: This option refreshes the data set and displays only those rows whose column value matches the value in the cell currently selected.
- Exclude Selection: This option refreshes the data set and excludes those rows whose column value matches the value in the cell currently selected.
| | +| `No limit` | Use the `No limit` drop-down listbox to specify how many rows to display in the output panel. Select from: `No limit` (the default), `1000 rows`, `500 rows`, or `100 rows`. | | +| `Execute/Refresh` | Click the `Execute/Refresh` icon to execute the SQL command that is displayed in the top panel. If you have not saved modifications to the content displayed in the data grid, you will be prompted to confirm the execution. To preserve your changes before refreshing the content, click the `Save` toolbar button before executing the refresh. | F5 | +| `Stop` | Click the `Stop` icon to cancel the execution of the currently running query. | | +| `Clear History` | Use the `Clear History` drop-down menu to erase the contents of the `History` tab. | | +| `Download as CSV` | Click the `Download as CSV` icon to download the result set of the current query to a comma-separated list. You can control the CSV settings through `Preferences -> SQL Editor -> CSV output` dialogue. | F8 | **The Data Grid** -The top row of the data grid displays the name of each column, the data type, and if applicable, the number of characters allowed. A column that is part of the primary key will additionally be marked with \[PK\]. +The top row of the data grid displays the name of each column, the data type, and if applicable, the number of characters allowed. A column that is part of the primary key will additionally be marked with \[PK]. @@ -74,13 +85,13 @@ You can access `Sort/Filter options dialog` by clicking on Sort/Filter button. T - Use `SQL Filter` to provide SQL filtering criteria. These will be added to the "WHERE" clause of the query used to retrieve the data. For example, you might enter: -``` sql +```sql id > 25 AND created > '2018-01-01' ``` - Use `Data Sorting` to sort the data in the output grid -To add new column(s) in data sorting grid, click on the \[+\] icon. +To add new column(s) in data sorting grid, click on the \[+] icon. - Use the drop-down `Column` to select the column you want to sort. - Use the drop-down `Order` to select the sort order for the column. @@ -93,6 +104,6 @@ To delete a row from the grid, click the trash icon.
-viewdata\_filter +viewdata_filter
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/05_schema_diff.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/05_schema_diff.mdx index 062addeeb45..07888d4d2b7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/05_schema_diff.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/05_schema_diff.mdx @@ -4,7 +4,6 @@ title: "Schema Diff" - **Schema Diff** is a feature that allows you to compare objects between two database or two schemas. Use the `Tools` menu to access Schema Diff. The Schema Diff feature allows you to: @@ -14,10 +13,9 @@ The Schema Diff feature allows you to: > - List the differences in SQL statement for target database objects. > - Generate synchronization scripts. -**Note:** - -> - The source and target database servers must be of the same major version. -> - If you compare two **schemas** then dependencies won't be resolved. +!!! Note + > - The source and target database servers must be of the same major version. + > - If you compare two **schemas** then dependencies won't be resolved. Click on *Schema Diff* under the *Tools* menu to open a selection panel. To compare **databases** choose the source and target servers, and databases. To compare **schemas** choose the source and target servers, databases, and schemas. After selecting the objects, click on the *Compare* button. @@ -25,7 +23,7 @@ You can open multiple copies of `Schema Diff` in individual tabs simultaneously. schema diff dialog -Use the [Preferences](../03_toc_pem_client/#preferences) dialog to specify following: +Use the [Preferences](../03_toc_pem_client/04_preferences/#preferences) dialog to specify following: > - *Schema Diff* should open in a new browser tab. Set *Open in new browser tab* option to true. > - *Schema Diff* should ignore the whitespaces while comparing string objects. Set *Ignore whitespaces* option to true. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/index.mdx index 38182336587..9e463dd7a79 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/08_toc_pem_developer_tools/index.mdx @@ -4,13 +4,12 @@ title: "Developer Tools" - The PEM client features powerful developer tools that you can use to execute and analyze complex SQL commands, manage data, and debug PL/SQL code. Contents:
-debugger query\_tool pem\_interpreting\_graphical\_query editgrid schema\_diff +debugger query_tool pem_interpreting_graphical_query editgrid schema_diff
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection.mdx index 479f4d7e627..d647f1a0f77 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/01_pem_pgbouncer_server_agent_connection.mdx @@ -4,8 +4,7 @@ title: "Connecting PEM to pgBouncer" - -Each PEM agent connects to the PEM database server using the SSL certificates for each individual user. For example, an agent with ID\#1 connects to the PEM database server using the agent1 user. +Each PEM agent connects to the PEM database server using the SSL certificates for each individual user. For example, an agent with ID#1 connects to the PEM database server using the agent1 user. ![PEM without pgbouncer](../images/pem_pgbouncer_without.png) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver.mdx index 0e6c277b65a..1aa72de0703 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/02_pem_pgbouncer_preparing_dbserver.mdx @@ -4,49 +4,62 @@ title: "Preparing the PEM Server for pgBouncer Connections" - You must configure the PEM database server to accept connections from pgBouncer; the following example demonstrates the steps required to prepare the PEM database server. 1. Create a dedicated user named pgbouncer on the PEM database server. For example: - pem=# CREATE USER pgbouncer PASSWORD 'ANY_PASSWORD' LOGIN; - CREATE ROLE + ``` + pem=# CREATE USER pgbouncer PASSWORD 'ANY_PASSWORD' LOGIN; + CREATE ROLE + ``` -2. Create a user named pem\_admin1 (a non-super user) with pem\_admin and pem\_agent\_pool role membership on the PEM database server. For example: +2. Create a user named pem_admin1 (a non-super user) with pem_admin and pem_agent_pool role membership on the PEM database server. For example: - pem=# CREATE USER pem_admin1 PASSWORD 'ANY_PASSWORD' LOGIN CREATEROLE; - CREATE ROLE - pem=# GRANT pem_admin, pem_agent_pool TO pem_admin1; - GRANT ROLE + ``` + pem=# CREATE USER pem_admin1 PASSWORD 'ANY_PASSWORD' LOGIN CREATEROLE; + CREATE ROLE + pem=# GRANT pem_admin, pem_agent_pool TO pem_admin1; + GRANT ROLE + ``` 3. Grant CONNECT privilege to the pgbouncer user on the pem database. For example: - pem=# GRANT CONNECT ON DATABASE pem TO pgbouncer ;GRANT USAGE ON SCHEMA pem TO pgbouncer; - GRANT + ``` + pem=# GRANT CONNECT ON DATABASE pem TO pgbouncer ;GRANT USAGE ON SCHEMA pem TO pgbouncer; + GRANT + ``` 4. Grant USAGE privilege to the pgbouncer user for the pem schema on the pem database. For example: - pem=# GRANT USAGE ON SCHEMA pem TO pgbouncer; - GRANT + ``` + pem=# GRANT USAGE ON SCHEMA pem TO pgbouncer; + GRANT + ``` -5. Grant EXECUTE privilege to the pgbouncer user on the pem.get\_agent\_pool\_auth(text) function in the pem database. For example: +5. Grant EXECUTE privilege to the pgbouncer user on the pem.get_agent_pool_auth(text) function in the pem database. For example: - pem=# GRANT EXECUTE ON FUNCTION pem.get_agent_pool_auth(text) TO pgbouncer; - GRANT + ``` + pem=# GRANT EXECUTE ON FUNCTION pem.get_agent_pool_auth(text) TO pgbouncer; + GRANT + ``` -6. Use the pem.create\_proxy\_agent\_user(varchar) function to create a user named pem\_agent\_user1 on the PEM database server. The function will create a user with the same name with a random password, and grant `pem_agent` and `pem_agent_pool` roles to the user. This allows pgBouncer to use a proxy user on behalf of the agent. For example: +6. Use the pem.create_proxy_agent_user(varchar) function to create a user named pem_agent_user1 on the PEM database server. The function will create a user with the same name with a random password, and grant `pem_agent` and `pem_agent_pool` roles to the user. This allows pgBouncer to use a proxy user on behalf of the agent. For example: - pem=# SELECT pem.create_proxy_agent_user('pem_agent_user1'); - create_proxy_agent_user - ------------------------- - (1 row) + ``` + pem=# SELECT pem.create_proxy_agent_user('pem_agent_user1'); + create_proxy_agent_user + ------------------------- + (1 row) + ``` 7. Add the following entries to the start of the `pg_hba.conf` file of the PEM database server; this will allow pgBouncer user to connect to the pem database using the md5 authentication method. For example: - # Allow the PEM agent proxy user (used by - # pgbouncer) to connect the to PEM server using - # md5 + ``` + # Allow the PEM agent proxy user (used by + # pgbouncer) to connect the to PEM server using + # md5 - local pem pgbouncer,pem_admin1 md5 + local pem pgbouncer,pem_admin1 md5 + ``` After configuring the PEM server, you should configure pgBouncer. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer.mdx index 7633fd63acf..82368180471 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/03_pem_pgbouncer_configuring_pgbouncer.mdx @@ -4,94 +4,105 @@ title: "Configuring pgBouncer" - -You must configure pgBouncer to work with the PEM database server. In our example, we will run pgBouncer as the enterprisedb system user. The following steps outline the process of configuring pgBouncer (version >= 1.9). +You must configure pgBouncer to work with the PEM database server. In our example, we will run pgBouncer as the enterprisedb system user. The following steps outline the process of configuring pgBouncer (version >= 1.9). 1. Open a terminal window and navigate into the pgBouncer directory. 2. Change the owner of the etc directory for pgBouncer (where pgbouncer.ini resides) to enterprisedb, and change the directory permissions to 0700. For example: - $ chown enterprisedb:enterprisedb /etc/edb/pgbouncer1.9 - $ chmod 0700 /etc/edb/pgbouncer1.9 + ``` + $ chown enterprisedb:enterprisedb /etc/edb/pgbouncer1.9 + $ chmod 0700 /etc/edb/pgbouncer1.9 + ``` 3. Change the contents of the pgbouncer.ini or edb-pgbouncer.ini file as follows: - [databases] - ;; Change the pool_size according to maximum connections allowed - ;; to the PEM database server as required. - ;; 'auth_user' will be used for authenticate the db user (proxy - ;; agent user in our case) - pem = port=5444 host=/tmp dbname=pem auth_user=pgbouncer pool_size=80 pool_mode=transaction - * = port=5444 host=/tmp dbname=pem auth_user=pgbouncer pool_size=10 - [pgbouncer] - logfile = /var/log/edb/pgbouncer1.9/edb-pgbouncer-1.9.log - pidfile = /var/run/edb/pgbouncer1.9/edb-pgbouncer-1.9.pid - listen_addr = * - ;; Agent needs to use this port to connect the pem database now - listen_port = 6432 - ;; Require to support for the SSL Certificate authentications - ;; for PEM Agents - client_tls_sslmode = require - ;; These are the root.crt, server.key, server.crt files present - ;; in the present under the data directory of the PEM database - ;; server, used by the PEM Agents for connections. - client_tls_ca_file = /var/lib/edb/as11/data/root.crt - client_tls_key_file = /var/lib/edb/as11/data/server.key - client_tls_cert_file = /var/lib/edb/as11/data/server.crt - ;; Use hba file for client connections - auth_type = hba - ;; Authentication file, Reference: - ;; https://pgbouncer.github.io/config.html#auth_file - auth_file = /etc/edb/pgbouncer1.9/userlist.txt - ;; HBA file - auth_hba_file = /etc/edb/pgbouncer1.9/hba_file - ;; Use pem.get_agent_pool_auth(TEXT) function to authenticate - ;; the db user (used as a proxy agent user). - auth_query = SELECT * FROM pem.get_agent_pool_auth($1) - ;; DB User for administration of the pgbouncer - admin_users = pem_admin1 - ;; DB User for collecting the statistics of pgbouncer - stats_users = pem_admin1 - server_reset_query = DISCARD ALL - ;; Change based on the number of agents installed/required - max_client_conn = 500 - ;; Close server connection if its not been used in this time. - ;; Allows to clean unnecessary connections from pool after peak. - server_idle_timeout = 60 + ``` + [databases] + ;; Change the pool_size according to maximum connections allowed + ;; to the PEM database server as required. + ;; 'auth_user' will be used for authenticate the db user (proxy + ;; agent user in our case) + pem = port=5444 host=/tmp dbname=pem auth_user=pgbouncer pool_size=80 pool_mode=transaction + * = port=5444 host=/tmp dbname=pem auth_user=pgbouncer pool_size=10 + [pgbouncer] + logfile = /var/log/edb/pgbouncer1.9/edb-pgbouncer-1.9.log + pidfile = /var/run/edb/pgbouncer1.9/edb-pgbouncer-1.9.pid + listen_addr = * + ;; Agent needs to use this port to connect the pem database now + listen_port = 6432 + ;; Require to support for the SSL Certificate authentications + ;; for PEM Agents + client_tls_sslmode = require + ;; These are the root.crt, server.key, server.crt files present + ;; in the present under the data directory of the PEM database + ;; server, used by the PEM Agents for connections. + client_tls_ca_file = /var/lib/edb/as11/data/root.crt + client_tls_key_file = /var/lib/edb/as11/data/server.key + client_tls_cert_file = /var/lib/edb/as11/data/server.crt + ;; Use hba file for client connections + auth_type = hba + ;; Authentication file, Reference: + ;; https://pgbouncer.github.io/config.html#auth_file + auth_file = /etc/edb/pgbouncer1.9/userlist.txt + ;; HBA file + auth_hba_file = /etc/edb/pgbouncer1.9/hba_file + ;; Use pem.get_agent_pool_auth(TEXT) function to authenticate + ;; the db user (used as a proxy agent user). + auth_query = SELECT * FROM pem.get_agent_pool_auth($1) + ;; DB User for administration of the pgbouncer + admin_users = pem_admin1 + ;; DB User for collecting the statistics of pgbouncer + stats_users = pem_admin1 + server_reset_query = DISCARD ALL + ;; Change based on the number of agents installed/required + max_client_conn = 500 + ;; Close server connection if its not been used in this time. + ;; Allows to clean unnecessary connections from pool after peak. + server_idle_timeout = 60 + ``` 4. Use the following command to create and update the /etc/edb/pgbouncer1.9/userlist.txt authentication file for pgBouncer: - pem=# COPY ( - SELECT 'pgbouncer'::TEXT, 'pgbouncer_password' - UNION ALL - SELECT 'pem_admin1'::TEXT, 'pem_admin1_password' - ) TO '/etc/edb/pgbouncer1.9/userlist.txt' - WITH (FORMAT CSV, DELIMITER ' ', FORCE_QUOTE *); - COPY 2 - -> NOTE: A super user cannot invoke the PEM authentication query function pem.get\_proxy\_auth(text). If the pem\_admin user is a super user, you must add the password to the authentication file, which is enterprisedb in the above example. - -1. Create an HBA file (/etc/edb/pgbouncer1.9/hba\_file) for pgBouncer that contains the following content: - - # Use authentication method md5 for the local connections to - # connect pem database & pgbouncer (virtual) database. - local pgbouncer all md5 - - # Use authentication method md5 for the remote connections to - # connect to pgbouncer (virtual database) using enterprisedb - # user. - host pgbouncer,pem pem_admin1 0.0.0.0/0 md5 - # Use authentication method cert for the TCP/IP connections to - # connect the pem database using pem_agent_user1 - hostssl pem pem_agent_user1 0.0.0.0/0 cert - -2. Change the owner of the HBA file (/etc/edb/pgbouncer1.9/hba\_file) to enterprisedb, and change the directory permissions to 0600. For example: - - $ chown enterprisedb:enterprisedb /etc/edb/pgbouncer1.9/hba_file - $ chmod 0600 /etc/edb/pgbouncer1.9/hba_file + ``` + pem=# COPY ( + SELECT 'pgbouncer'::TEXT, 'pgbouncer_password' + UNION ALL + SELECT 'pem_admin1'::TEXT, 'pem_admin1_password' + ) TO '/etc/edb/pgbouncer1.9/userlist.txt' + WITH (FORMAT CSV, DELIMITER ' ', FORCE_QUOTE *); + COPY 2 + ``` + +> NOTE: A super user cannot invoke the PEM authentication query function pem.get_proxy_auth(text). If the pem_admin user is a super user, you must add the password to the authentication file, which is enterprisedb in the above example. + +1. Create an HBA file (/etc/edb/pgbouncer1.9/hba_file) for pgBouncer that contains the following content: + + ``` + # Use authentication method md5 for the local connections to + # connect pem database & pgbouncer (virtual) database. + local pgbouncer all md5 + + # Use authentication method md5 for the remote connections to + # connect to pgbouncer (virtual database) using enterprisedb + # user. + host pgbouncer,pem pem_admin1 0.0.0.0/0 md5 + # Use authentication method cert for the TCP/IP connections to + # connect the pem database using pem_agent_user1 + hostssl pem pem_agent_user1 0.0.0.0/0 cert + ``` + +2. Change the owner of the HBA file (/etc/edb/pgbouncer1.9/hba_file) to enterprisedb, and change the directory permissions to 0600. For example: + + ``` + $ chown enterprisedb:enterprisedb /etc/edb/pgbouncer1.9/hba_file + $ chmod 0600 /etc/edb/pgbouncer1.9/hba_file + ``` 3. Enable the pgBouncer service, and start the service. For example: - $ systemctl enable edb-pgbouncer-1.9 - Created symlink from /etc/systemd/system/multi-user.target.wants/edb-pgbouncer-1.9.service to /usr/lib/systemd/system/edb-pgbouncer-1.9.service. - $ systemctl start edb-pgbouncer-1.9 + ``` + $ systemctl enable edb-pgbouncer-1.9 + Created symlink from /etc/systemd/system/multi-user.target.wants/edb-pgbouncer-1.9.service to /usr/lib/systemd/system/edb-pgbouncer-1.9.service. + $ systemctl start edb-pgbouncer-1.9 + ``` diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent.mdx index e684c3bdf88..ff04b3815a9 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/04_pem_pgbouncer_configuring_pem_agent.mdx @@ -4,7 +4,6 @@ title: "Configuring the PEM Agent to use pgBouncer" - You can use an RPM package to install a PEM Agent; for detailed installation information, please see the PEM Installation Guide, available from the [EnterpriseDB website](https://www.enterprisedb.com/resources/product-documentation) Please note that PEM Agent which is responsible for sending SNMP notifications should not be configured with pgBouncer. For Example - If default PEM Agent installed along with PEM Server is used for SNMP notifications, then it should not be configured with pgBouncer. @@ -13,63 +12,77 @@ Please note that PEM Agent which is responsible for sending SNMP notifications s After using an RPM package to install the PEM agent, you will need to configure it to work it against a particular PEM database server. Use the following command: - $ PGSSLMODE=require PEM_SERVER_PASSWORD=pem_admin1_password /usr/edb/pem/agent/bin/pemworker --register-agent --pem-server 172.16.254.22 --pem-port 6432 --pem-user pem_admin1 --pem-agent-user pem_agent_user1 --display-name *Agent_Name* - Postgres Enterprise Manager Agent registered successfully! +``` +$ PGSSLMODE=require PEM_SERVER_PASSWORD=pem_admin1_password /usr/edb/pem/agent/bin/pemworker --register-agent --pem-server 172.16.254.22 --pem-port 6432 --pem-user pem_admin1 --pem-agent-user pem_agent_user1 --display-name *Agent_Name* +Postgres Enterprise Manager Agent registered successfully! +``` -In above command, the command line argument --pem-agent-user instructs the agent to create an SSL certificate and key pair for the pem\_agent\_user1 database user in /root/.pem directory. For example: +In above command, the command line argument --pem-agent-user instructs the agent to create an SSL certificate and key pair for the pem_agent_user1 database user in /root/.pem directory. For example: - /root/.pem/pem_agent_user1.crt - /root/.pem/pem_agent_user1.key +``` +/root/.pem/pem_agent_user1.crt +/root/.pem/pem_agent_user1.key +``` -They will be used by the PEM agent to connect to the PEM database server as pem\_agent\_user1. It will also create /usr/edb/pem/agent/etc/agent.cfg. +They will be used by the PEM agent to connect to the PEM database server as pem_agent_user1. It will also create /usr/edb/pem/agent/etc/agent.cfg. You will find a line mentioning the agent-user to be used in the agent.cfg configuration file. For example: - $ cat /usr/edb/pem/agent/etc/agent.cfg - [PEM/agent] - pem_host=172.16.254.22 - pem_port=6432 - agent_id=12 - agent_user=pem_agent_user1 - agent_ssl_key=/root/.pem/pem_agent_user1.key - agent_ssl_crt=/root/.pem/pem_agent_user1.crt - log_level=warning - log_location=/var/log/pem/worker.log - agent_log_location=/var/log/pem/agent.log - long_wait=30 - short_wait=10 - alert_threads=0 - enable_smtp=false - enable_snmp=false - enable_webhook=false - max_webhook_retries=3 - allow_server_restart=true - max_connections=0 - connect_timeout=-1 - connection_lifetime=0 - allow_batch_probes=false - heartbeat_connection=false +``` +$ cat /usr/edb/pem/agent/etc/agent.cfg +[PEM/agent] +pem_host=172.16.254.22 +pem_port=6432 +agent_id=12 +agent_user=pem_agent_user1 +agent_ssl_key=/root/.pem/pem_agent_user1.key +agent_ssl_crt=/root/.pem/pem_agent_user1.crt +log_level=warning +log_location=/var/log/pem/worker.log +agent_log_location=/var/log/pem/agent.log +long_wait=30 +short_wait=10 +alert_threads=0 +enable_smtp=false +enable_snmp=false +enable_webhook=false +max_webhook_retries=3 +allow_server_restart=true +max_connections=0 +connect_timeout=-1 +connection_lifetime=0 +allow_batch_probes=false +heartbeat_connection=false +``` ## Configuring an Existing PEM Agent (installed using an RPM) If you are using an existing PEM agent, you can copy the SSL certificate and key files to the target machine, and reuse the files. You will need to modify the files, adding a new parameter and replacing some parameters in the existing `agent.cfg` file. -Add a line for agent\_user to be used for the agent. For example: +Add a line for agent_user to be used for the agent. For example: - agent_user=pem_agent_user1 +``` +agent_user=pem_agent_user1 +``` Update the port to specify the pgBouncer port. For example: - pem_port=6432 +``` +pem_port=6432 +``` Update the certificate and key path locations. For example: - agent_ssl_key=/root/.pem/pem_agent_user1.key - agent_ssl_crt=/root/.pem/pem_agent_user1.crt +``` +agent_ssl_key=/root/.pem/pem_agent_user1.key +agent_ssl_crt=/root/.pem/pem_agent_user1.crt +``` Please note: as an alternative, you can run the agent self registration, but that will create a new agent id. If you do run the agent self-registration, you must replace the new agent id with existing id, and disable the entry for the new agent id in the pem.agent table. For example: - pem=# UPDATE pem.agent SET active = false WHERE id = *new_agent_id*; - UPDATE 1 +``` +pem=# UPDATE pem.agent SET active = false WHERE id = *new_agent_id*; +UPDATE 1 +``` Please keep a backup of the existing SSL certificate, key file, and agent configuration file. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/index.mdx index fea89226d65..270c06d0562 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/09_toc_pem_configure_pgbouncer/index.mdx @@ -4,13 +4,12 @@ title: "Configuring pgBouncer for use with PEM Agents" - pgBouncer is a lightweight connection pooler for Postgres. You can use pgBouncer to limit the number of connections from the PEM Agent towards the Postgres Enterprise Manager (PEM) server on a non-Windows machine. Contents:
-pem\_pgbouncer\_server\_agent\_connection pem\_pgbouncer\_preparing\_dbserver pem\_pgbouncer\_configuring\_pgbouncer pem\_pgbouncer\_configuring\_pem\_agent +pem_pgbouncer_server_agent_connection pem_pgbouncer_preparing_dbserver pem_pgbouncer_configuring_pgbouncer pem_pgbouncer_configuring_pem_agent
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/01_using_pgagent.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/01_using_pgagent.mdx index e9d17c943fd..9fb69ad8a21 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/01_using_pgagent.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/01_using_pgagent.mdx @@ -4,7 +4,6 @@ title: "Using pgAgent" - pgAgent is a scheduling agent that runs and manages jobs; each job consists of one or more steps and schedules. If two or more jobs are scheduled to execute concurrently, pgAgent will execute the jobs in parallel (each with its own thread). A step may be a series of SQL statements or an operating system batch/shell script. Each step in a given job is executed when the previous step completes, in alphanumeric order by name. Switches on the `pgAgent Job` dialog (accessed through the `Properties` context menu) allow you to modify a job, enabling or disabling individual steps as needed. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/02_pgagent_install.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/02_pgagent_install.mdx index 12b373e77a9..89f85b3278e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/02_pgagent_install.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/02_pgagent_install.mdx @@ -4,20 +4,23 @@ title: "Installing pgAgent" - pgAgent runs as a daemon on Unix systems, and a service on Windows systems. In most cases it will run on the database server itself - for this reason, pgAgent is not automatically configured when PEM is installed. In some cases however, it may be preferable to run pgAgent on multiple systems, against the same database; individual jobs may be targeted at a particular host, or left for execution by any host. Locking prevents execution of the same instance of a job by multiple hosts. ## Database setup Before using PEM to manage pgAgent, you must create the pgAgent extension in the maintenance database registered with PEM. To install pgAgent on a PostgreSQL host, connect to the `postgres` database, and navigate through the `Tools` menu to open the Query tool. For server versions 9.1 or later, and pgAgent 3.4.0 or later, enter the following command in the query window, and click the `Execute` icon: - CREATE EXTENSION pgagent; +``` +CREATE EXTENSION pgagent; +``` This command will create a number of tables and other objects in a schema called 'pgagent'. The database must also have the pl/pgsql procedural language installed - use the PostgreSQL `CREATE LANGUAGE` command to install pl/pgsql if necessary. To install pl/pgsql, enter the following command in the query window, and click the `Execute` icon: - CREATE LANGUAGE plpgsql; +``` +CREATE LANGUAGE plpgsql; +``` ## Daemon installation on Unix @@ -37,20 +40,24 @@ To install the pgAgent daemon on a Unix system, you will normally need to have r The program itself takes few command line options, most of which are only needed for debugging or specialised configurations: - Usage: - /path/to/pgagent [options] +``` +Usage: + /path/to/pgagent [options] - options: - -f run in the foreground (do not detach from the terminal) - -t - -r =10, default 30)> - -s - -l +options: + -f run in the foreground (do not detach from the terminal) + -t + -r =10, default 30)> + -s + -l +``` The connection string is a standard PostgreSQL libpq connection string (see the [PostgreSQL documentation on the connection string](http://www.postgresql.org/docs/current/static/libpq.html#libpq-connect) for further details). For example, the following command line will run pgAgent against a server listening on the localhost, using a database called 'postgres', connecting as the user 'postgres': - /path/to/pgagent hostaddr=127.0.0.1 dbname=postgres user=postgres +``` +/path/to/pgagent hostaddr=127.0.0.1 dbname=postgres user=postgres +``` ## Service installation on Windows @@ -68,21 +75,25 @@ pgAgent is available in a pre-built installer if you use [EnterpriseDB's Postgre pgAgent can install itself as a service on Windows systems. The command line options available are similar to those on Unix systems, but include an additional parameter to tell the service what to do: - Usage: - pgAgent REMOVE - pgAgent INSTALL [options] - pgAgent DEBUG [options] - - options: - -u - -p - -d - -t - -r =10, default 30)> - -l +``` +Usage: + pgAgent REMOVE + pgAgent INSTALL [options] + pgAgent DEBUG [options] + + options: + -u + -p + -d + -t + -r =10, default 30)> + -l +``` The service may be quite simply installed from the command line as follows (adjust the path as required): - "C:\Program Files\pgAgent\bin\pgAgent" INSTALL pgAgent -u postgres -p secret hostaddr=127.0.0.1 dbname=postgres user=postgres +``` +"C:\Program Files\pgAgent\bin\pgAgent" INSTALL pgAgent -u postgres -p secret hostaddr=127.0.0.1 dbname=postgres user=postgres +``` You can then start the service at the command line using `net start pgAgent`, or from the `Services` control panel applet. Any logging output or errors will be reported in the Application event log. The DEBUG mode may be used to run pgAgent from a command prompt. When run this way, log messages will output to the command window. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/03_pgagent_jobs.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/03_pgagent_jobs.mdx index c66c8c8fd30..ad4a501a1e4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/03_pgagent_jobs.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/03_pgagent_jobs.mdx @@ -4,7 +4,6 @@ title: "Creating a pgAgent Job" - pgAgent is a scheduling agent that runs and manages jobs; each job consists of steps and schedules. To create or manage a job, use the Browser tree control to browse to the server on which the pgAgent database objects were created. The tree control will display a `pgAgent Jobs` node, under which currently defined jobs are displayed. To add a new job, right click on the `pgAgent Jobs` node, and select `Create pgAgent Job...` from the context menu. @@ -23,9 +22,12 @@ Use the fields on the `General` tab to provide general information about a job: > > - Use the `Host Agent` field to specify the name of a machine that is running pgAgent to indicate that only that machine may execute the job. Leave the field blank to specify that any machine may perform the job. > -> **Note:** It is not always obvious what value to specify for the Host Agent in order to target a job step to a specific machine. With pgAgent running on the required machines and connected to the scheduler database, you can use the following query to view the hostnames as reported by each agent: +> !!! Note +> It is not always obvious what value to specify for the Host Agent in order to target a job step to a specific machine. With pgAgent running on the required machines and connected to the scheduler database, you can use the following query to view the hostnames as reported by each agent: > -> SELECT jagstation FROM pgagent.pga_jobagent +> ``` +> SELECT jagstation FROM pgagent.pga_jobagent +> ``` > > Use the hostname exactly as reported by the query in the Host Agent field. > diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/04_pgagent-steps.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/04_pgagent-steps.mdx index 866b15a08b6..94c36fea1fe 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/04_pgagent-steps.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/04_pgagent-steps.mdx @@ -4,7 +4,6 @@ title: "pgAgent Steps" - Each Job consists of a number of steps, each of which may be an SQL script, or an operating system batch/shell script. Each step in a given job is run in turn, in alphanumeric name order. Steps may be added to a job through the job properties dialogue, or added as a sub-object. The `Properties` tab of the main PEM client window will display details of the selected step, and the `Statistics` tab will display details of each run of the step, including and output or errors from the script. @@ -15,4 +14,4 @@ Each step consists of the details shown on the screenshot below, most of which a ![pgAgent Job - Steps definition details](../images/pgagent_stepdetails.png) -The `Definition` tab contains a single text box into which the step script should be entered. For SQL steps, this should be a series of one or more SQL statements. For batch jobs, when running on a Windows server, standard batch file syntax must be used, and when running on a *nix server, any shell script may be used, provided that a suitable interpreter is specified on the first line (e.g.*\#!/bin/sh\*). +The `Definition` tab contains a single text box into which the step script should be entered. For SQL steps, this should be a series of one or more SQL statements. For batch jobs, when running on a Windows server, standard batch file syntax must be used, and when running on a *nix server, any shell script may be used, provided that a suitable interpreter is specified on the first line (e.g.*#!/bin/sh\*). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/05_pgagent-schedules.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/05_pgagent-schedules.mdx index f27154c852e..81b7e946b21 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/05_pgagent-schedules.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/05_pgagent-schedules.mdx @@ -4,7 +4,6 @@ title: "pgAgent Schedules" - Each Job is executed according to one or more schedules. Each time the job or any of its schedules are altered, the next runtime of the job is re-calculated. Each instance of pgAgent periodically polls the database for jobs with the next runtime value in the past. By polling at least once every minute, all jobs will normally start within one minute of the specified start time. If no pgAgent instance is running at the next runtime of a job, it will run as soon as pgAgent is next started, following which it will return to the normal schedule. Schedules may be added to a job through the job properties dialogue, or added as a sub-object. The `Properties` tab of the main PEM client window will display details of the selected schedule. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/index.mdx index 3ee119a8718..1fc62f8790e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/10_pgagent/index.mdx @@ -4,7 +4,6 @@ title: "pgAgent" - pgAgent is a job scheduling agent for Postgres databases, capable of running multi-step batch or shell scripts and SQL tasks on complex schedules. pgAgent is distributed independently. You can download pgAgent from the [download area](http://www.pgadmin.org/download) of the pgAdmin website. @@ -13,6 +12,6 @@ Contents:
-using\_pgagent pgagent\_install pgagent\_jobs pgagent-steps pgagent-schedules +using_pgagent pgagent_install pgagent_jobs pgagent-steps pgagent-schedules
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/01_licence.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/01_licence.mdx index 76b93db72f1..9006c4912fb 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/01_licence.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/01_licence.mdx @@ -4,5 +4,4 @@ title: "Licence" - pgAdmin is released under the [PostgreSQL Licence](http://www.postgresql.org/about/licence), which is a liberal Open Source licence similar to BSD or MIT, and approved by the Open Source Initiative. The copyright for the project source code, website and documentation is attributed to the [pgAdmin Development Team](https://www.pgadmin.org/development/team.php). diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/02_kerberos.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/02_kerberos.mdx index 82bd81cc64c..dfb82df61ed 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/02_kerberos.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/02_kerberos.mdx @@ -4,7 +4,6 @@ title: "The MIT Kerberos Licence" - PostgreSQL Enterprise Manager uses PostgreSQL's libpq library which may be linked with MIT Kerberos Libraries on some distributions. The MIT Kerberos licence is included below: ## Kerberos Copyright diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/03_openssl.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/03_openssl.mdx index 87680f07fff..ab8c41655ce 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/03_openssl.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/03_openssl.mdx @@ -4,7 +4,6 @@ title: "The OpenSSL Licence" - Postgres Enterprise Manager uses code from the OpenSSL project to provide support for SSL encrypted connections. The OpenSSL licence is included below: **Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.** @@ -14,7 +13,7 @@ Redistribution and use in source and binary forms, with or without modification, - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - All advertising materials mentioning features or use of this software must display the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit. ()" -- The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact . +- The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact [openssl-core@openssl.org](mailto:openssl-core@openssl.org). - Products derived from this software may not be called "OpenSSL" nor may "OpenSSL" appear in their names without prior written permission of the OpenSSL Project. - Redistributions of any form whatsoever must retain the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit ()" diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/04_snmp++.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/04_snmp++.mdx index 68a53f4d83c..d4c62142a65 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/04_snmp++.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/04_snmp++.mdx @@ -4,7 +4,6 @@ title: "The SNMP++ Licence" - Postgres Enterprise Manager uses code from the SNMP++ project to send snmp v1/v2 notifications. The SNMP++ licence is included below: **Copyright (c) 2001-2010 Jochen Katz, Frank Fock** diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/05_jquery_table_sort.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/05_jquery_table_sort.mdx index b9cdb60981a..d46b7908b37 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/05_jquery_table_sort.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/05_jquery_table_sort.mdx @@ -4,7 +4,6 @@ title: "The jquery table sort Licence" +TABLESORT.JS Copyright, Andy Croxall ([mitya@mitya.co.uk](mailto:mitya@mitya.co.uk)) For documentation and demo see -TABLESORT.JS Copyright, Andy Croxall () For documentation and demo see - -USAGE This script may be used, distributed and modified freely but this header must remain in tact. For usage info and demo, including info on args and params, see www.mitya.co.uk/scripts +USAGE This script may be used, distributed and modified freely but this header must remain in tact. For usage info and demo, including info on args and params, see [www.mitya.co.uk/scripts](http://www.mitya.co.uk/scripts) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/index.mdx index 58f14071927..7e3f8b511f0 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/11_appendices/index.mdx @@ -4,19 +4,18 @@ title: "Appendices" - Contents:
-licence kerberos openssl snmp++ jquery\_table\_sort +licence kerberos openssl snmp++ jquery_table_sort
**Additional Licences** | Component Name | Licence Name | Licence Url | -|-----------------------------------|------------------------|-------------------------------------------------------------------------------| +| --------------------------------- | ---------------------- | ----------------------------------------------------------------------------- | | wxWidgets | wxWidgets Licence | | | Apache HTTPD | Apache Licence | | | PHP | PHP Licence | | diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/01_pem_release_notes_8_0_1.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/01_pem_release_notes_8_0_1.mdx index b897ec69802..5e555cd08c7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/01_pem_release_notes_8_0_1.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/01_pem_release_notes_8_0_1.mdx @@ -2,65 +2,63 @@ title: "PEM v8.0.1" --- - - Release date: 2021-03-03 ## Features -[Issue \#5091](https://redmine.postgresql.org/issues/5091) - Make Statistics, Dependencies, Dependants tabs closable and the user can add them back using the 'Add panel' option. +[Issue #5091](https://redmine.postgresql.org/issues/5091) - Make Statistics, Dependencies, Dependants tabs closable and the user can add them back using the 'Add panel' option. ## Housekeeping -[Issue \#5338](https://redmine.postgresql.org/issues/5338) - Improve code coverage and API test cases for pgAgent. -[Issue \#5343](https://redmine.postgresql.org/issues/5343) - Improve code coverage and API test cases for Debugger. -[Issue \#6079](https://redmine.postgresql.org/issues/6079) - Updated mimetype from 'text/javascript' to 'application/javascript' as 'text/javascript' is obsolete. +[Issue #5338](https://redmine.postgresql.org/issues/5338) - Improve code coverage and API test cases for pgAgent. +[Issue #5343](https://redmine.postgresql.org/issues/5343) - Improve code coverage and API test cases for Debugger. +[Issue #6079](https://redmine.postgresql.org/issues/6079) - Updated mimetype from 'text/javascript' to 'application/javascript' as 'text/javascript' is obsolete. ## Bug fixes -PEM-3549 - Fixed the issue where latest BART backups were not displaying on BART dashboard refresh. \[Support ticket \# 1046037\] -PEM-3577 - Allow superuser name containing "-" provided during configuring configure-pem-server.sh script. \[Support ticket \# 1055435\] -PEM-3881 - Close the operating system resources properly during batch probe and command execution to avoid 'too many open files' error. \[Support ticket \# 1048713\] -PEM-3845 - Bundle pgaevent dll in agent windows installer to suppress event message log error. \[Support ticket \# 1106021\] -PEM-3901 - Fixed the unicode string handling support in pemAgent. \[Support ticket \# 1153153\] -PEM-3903 - Fixed the issue where BART restore was failing when agent was not bound with BART server. \[Support ticket \# 1129454\] -[Issue \#4892](https://redmine.postgresql.org/issues/4892) - Fixed an issue where pressing the back button will show another instance of the main page inside of the Query Tool tab. -[Issue \#5282](https://redmine.postgresql.org/issues/5282) - Added 'Count Rows' option to the partition sub tables. -[Issue \#5488](https://redmine.postgresql.org/issues/5488) - Improve the explain plan details by showing popup instead of tooltip on clicking of the specified node. -[Issue \#5571](https://redmine.postgresql.org/issues/5571) - Added support for expression in exclusion constraints. -[Issue \#5809](https://redmine.postgresql.org/issues/5809) - Fixed an issue where the focus is not properly set on the filter text editor after closing the error dialog. -[Issue \#5871](https://redmine.postgresql.org/issues/5871) - Ensure that username should be visible in the 'Connect to Server' popup when service and user name both specified. -[Issue \#5875](https://redmine.postgresql.org/issues/5875) - Ensure that the 'template1' database should not be visible after pg\_upgrade. -[Issue \#5886](https://redmine.postgresql.org/issues/5886) - Fixed false error is shown while adding a new foreign key from the table dialog when a foreign key already exists with Auto FK Index set to true. -[Issue \#5905](https://redmine.postgresql.org/issues/5905) - Fixed an issue where the Save button is enabled by default in Macro. -[Issue \#5906](https://redmine.postgresql.org/issues/5906) - Remove extra line after Manage Macros menu while clearing all macros. -[Issue \#5907](https://redmine.postgresql.org/issues/5907) - Ensure that 'Clear All Rows' should not work if there is no existing macro available and the user does not specify any value. -[Issue \#5929](https://redmine.postgresql.org/issues/5929) - Fixed an issue where the server is disconnected error message displayed if the user creates Macro with invalid SQL. -[Issue \#5965](https://redmine.postgresql.org/issues/5965) - Ensure that the macro query result should be download properly. -[Issue \#5973](https://redmine.postgresql.org/issues/5973) - Added appropriate help message and a placeholder for letting users know about the account password expiry for Login/Group Role. -[Issue \#5991](https://redmine.postgresql.org/issues/5991) - Ensure that dirty indicator (\*) should not be visible when renaming the tabs. -[Issue \#5992](https://redmine.postgresql.org/issues/5992) - Fixed an issue where escape character is shown when the server/database name has some special characters. -[Issue \#5997](https://redmine.postgresql.org/issues/5997) - Updated Flask-BabelEx to the latest. -[Issue \#5998](https://redmine.postgresql.org/issues/5998) - Fixed an issue where schema diff doesn't show the result of compare if source schema has tables with RLS. -[Issue \#6003](https://redmine.postgresql.org/issues/6003) - Fixed an issue where an illegal argument is showing for trigger SQL when a trigger is created for View. -[Issue \#6045](https://redmine.postgresql.org/issues/6045) - Fixed autocomplete issue where it is not showing any suggestions if the schema name contains escape characters. -[Issue \#6046](https://redmine.postgresql.org/issues/6046) - Fixed an issue where the state of the Save File icon does not match the dirty editor indicator. -[Issue \#6047](https://redmine.postgresql.org/issues/6047) - Fixed an issue where the dirty indicator stays active even if all changes were undone. -[Issue \#6058](https://redmine.postgresql.org/issues/6058) - Ensure that the rename panel should be disabled when the SQL file opened in the query tool. -[Issue \#6061](https://redmine.postgresql.org/issues/6061) - Fixed extra parentheses issue around joins for Views. -[Issue \#6065](https://redmine.postgresql.org/issues/6065) - Fixed accessibility issues in schema diff module. -[Issue \#6069](https://redmine.postgresql.org/issues/6069) - Fixed an issue on refreshing files in Query Tool. -[Issue \#6077](https://redmine.postgresql.org/issues/6077) - Fixed accessibility issues in various dialogs. -[Issue \#6084](https://redmine.postgresql.org/issues/6084) - Fixed TypeError exception in schema diff when selected any identical object. -[Issue \#6087](https://redmine.postgresql.org/issues/6087) - Fixed an issue where the dependencies tab showing multiple owners for the objects having shared dependencies. -[Issue \#6098](https://redmine.postgresql.org/issues/6098) - Fixed an issue of deleting records when the user tries to delete multiple records. -[Issue \#6120](https://redmine.postgresql.org/issues/6120) - Ensure that the user should be able to specify an older date for the account expiration of the role/user. -[Issue \#6121](https://redmine.postgresql.org/issues/6121) - Fixed an issue where the database list in the new connection window is not visible. -[Issue \#6122](https://redmine.postgresql.org/issues/6122) - Added informative message when there is no difference found for schema diff. -[Issue \#6128](https://redmine.postgresql.org/issues/6128) - Fixed an issue where sequences are not created. -[Issue \#6140](https://redmine.postgresql.org/issues/6140) - Ensure that verbose logs should be visible for Utility(Backup, Maintenance) jobs. -[Issue \#6144](https://redmine.postgresql.org/issues/6144) - Ensure that the current value of the sequence should be ignored while comparing using schema diff. -[Issue \#6157](https://redmine.postgresql.org/issues/6157) - Fixed an issue where strike-through is not visible for rows selected for deletion after scrolling. -[Issue \#6178](https://redmine.postgresql.org/issues/6178) - Fixed an issue where the user unable to change the background color for a server. -[Issue \#6187](https://redmine.postgresql.org/issues/6187) - Limit the upgrade check to run once per day. -[Issue \#6208](https://redmine.postgresql.org/issues/6208) - Fixed an issue where utility(Backup, Maintenance, ...) jobs are failing when the log level is set to DEBUG. +PEM-3549 - Fixed the issue where latest BART backups were not displaying on BART dashboard refresh. \[Support ticket # 1046037] +PEM-3577 - Allow superuser name containing "-" provided during configuring configure-pem-server.sh script. \[Support ticket # 1055435] +PEM-3881 - Close the operating system resources properly during batch probe and command execution to avoid 'too many open files' error. \[Support ticket # 1048713] +PEM-3845 - Bundle pgaevent dll in agent windows installer to suppress event message log error. \[Support ticket # 1106021] +PEM-3901 - Fixed the unicode string handling support in pemAgent. \[Support ticket # 1153153] +PEM-3903 - Fixed the issue where BART restore was failing when agent was not bound with BART server. \[Support ticket # 1129454] +[Issue #4892](https://redmine.postgresql.org/issues/4892) - Fixed an issue where pressing the back button will show another instance of the main page inside of the Query Tool tab. +[Issue #5282](https://redmine.postgresql.org/issues/5282) - Added 'Count Rows' option to the partition sub tables. +[Issue #5488](https://redmine.postgresql.org/issues/5488) - Improve the explain plan details by showing popup instead of tooltip on clicking of the specified node. +[Issue #5571](https://redmine.postgresql.org/issues/5571) - Added support for expression in exclusion constraints. +[Issue #5809](https://redmine.postgresql.org/issues/5809) - Fixed an issue where the focus is not properly set on the filter text editor after closing the error dialog. +[Issue #5871](https://redmine.postgresql.org/issues/5871) - Ensure that username should be visible in the 'Connect to Server' popup when service and user name both specified. +[Issue #5875](https://redmine.postgresql.org/issues/5875) - Ensure that the 'template1' database should not be visible after pg_upgrade. +[Issue #5886](https://redmine.postgresql.org/issues/5886) - Fixed false error is shown while adding a new foreign key from the table dialog when a foreign key already exists with Auto FK Index set to true. +[Issue #5905](https://redmine.postgresql.org/issues/5905) - Fixed an issue where the Save button is enabled by default in Macro. +[Issue #5906](https://redmine.postgresql.org/issues/5906) - Remove extra line after Manage Macros menu while clearing all macros. +[Issue #5907](https://redmine.postgresql.org/issues/5907) - Ensure that 'Clear All Rows' should not work if there is no existing macro available and the user does not specify any value. +[Issue #5929](https://redmine.postgresql.org/issues/5929) - Fixed an issue where the server is disconnected error message displayed if the user creates Macro with invalid SQL. +[Issue #5965](https://redmine.postgresql.org/issues/5965) - Ensure that the macro query result should be download properly. +[Issue #5973](https://redmine.postgresql.org/issues/5973) - Added appropriate help message and a placeholder for letting users know about the account password expiry for Login/Group Role. +[Issue #5991](https://redmine.postgresql.org/issues/5991) - Ensure that dirty indicator (\*) should not be visible when renaming the tabs. +[Issue #5992](https://redmine.postgresql.org/issues/5992) - Fixed an issue where escape character is shown when the server/database name has some special characters. +[Issue #5997](https://redmine.postgresql.org/issues/5997) - Updated Flask-BabelEx to the latest. +[Issue #5998](https://redmine.postgresql.org/issues/5998) - Fixed an issue where schema diff doesn't show the result of compare if source schema has tables with RLS. +[Issue #6003](https://redmine.postgresql.org/issues/6003) - Fixed an issue where an illegal argument is showing for trigger SQL when a trigger is created for View. +[Issue #6045](https://redmine.postgresql.org/issues/6045) - Fixed autocomplete issue where it is not showing any suggestions if the schema name contains escape characters. +[Issue #6046](https://redmine.postgresql.org/issues/6046) - Fixed an issue where the state of the Save File icon does not match the dirty editor indicator. +[Issue #6047](https://redmine.postgresql.org/issues/6047) - Fixed an issue where the dirty indicator stays active even if all changes were undone. +[Issue #6058](https://redmine.postgresql.org/issues/6058) - Ensure that the rename panel should be disabled when the SQL file opened in the query tool. +[Issue #6061](https://redmine.postgresql.org/issues/6061) - Fixed extra parentheses issue around joins for Views. +[Issue #6065](https://redmine.postgresql.org/issues/6065) - Fixed accessibility issues in schema diff module. +[Issue #6069](https://redmine.postgresql.org/issues/6069) - Fixed an issue on refreshing files in Query Tool. +[Issue #6077](https://redmine.postgresql.org/issues/6077) - Fixed accessibility issues in various dialogs. +[Issue #6084](https://redmine.postgresql.org/issues/6084) - Fixed TypeError exception in schema diff when selected any identical object. +[Issue #6087](https://redmine.postgresql.org/issues/6087) - Fixed an issue where the dependencies tab showing multiple owners for the objects having shared dependencies. +[Issue #6098](https://redmine.postgresql.org/issues/6098) - Fixed an issue of deleting records when the user tries to delete multiple records. +[Issue #6120](https://redmine.postgresql.org/issues/6120) - Ensure that the user should be able to specify an older date for the account expiration of the role/user. +[Issue #6121](https://redmine.postgresql.org/issues/6121) - Fixed an issue where the database list in the new connection window is not visible. +[Issue #6122](https://redmine.postgresql.org/issues/6122) - Added informative message when there is no difference found for schema diff. +[Issue #6128](https://redmine.postgresql.org/issues/6128) - Fixed an issue where sequences are not created. +[Issue #6140](https://redmine.postgresql.org/issues/6140) - Ensure that verbose logs should be visible for Utility(Backup, Maintenance) jobs. +[Issue #6144](https://redmine.postgresql.org/issues/6144) - Ensure that the current value of the sequence should be ignored while comparing using schema diff. +[Issue #6157](https://redmine.postgresql.org/issues/6157) - Fixed an issue where strike-through is not visible for rows selected for deletion after scrolling. +[Issue #6178](https://redmine.postgresql.org/issues/6178) - Fixed an issue where the user unable to change the background color for a server. +[Issue #6187](https://redmine.postgresql.org/issues/6187) - Limit the upgrade check to run once per day. +[Issue #6208](https://redmine.postgresql.org/issues/6208) - Fixed an issue where utility(Backup, Maintenance, ...) jobs are failing when the log level is set to DEBUG. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/02_pem_release_notes_8_0.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/02_pem_release_notes_8_0.mdx index fc2e3266d85..9ce77545f54 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/02_pem_release_notes_8_0.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/02_pem_release_notes_8_0.mdx @@ -2,73 +2,71 @@ title: "PEM v8.0" --- - - Release date: 2020-12-09 ## Features -PEM-3669 - Support new configurations of BART ('bart\_socket\_name') in PEM -PEM-3612 - Enhanced the placeholders for alert script execution. \[Support Ticket \# 1051538\] -PEM-3613 - Added documentation for "How to replace alert placeholders inside script". \[Support Ticket \# 1051538\] +PEM-3669 - Support new configurations of BART ('bart_socket_name') in PEM +PEM-3612 - Enhanced the placeholders for alert script execution. \[Support Ticket # 1051538] +PEM-3613 - Added documentation for "How to replace alert placeholders inside script". \[Support Ticket # 1051538] PEM-3786 - Added support for --checksum-algorithm and --disable-checksum parameters introduced in BART 2.6.0. -PEM-2501 - Added alert details sql for template 'A user expires in N days'. \[Support Ticket \# 891377\] -PEM-3684, PEM-3665 - Added alert details sql for Alert Errors, Table bloat, Dead/live tuples and few other. \[Support Ticket \# 1065250\] +PEM-2501 - Added alert details sql for template 'A user expires in N days'. \[Support Ticket # 891377] +PEM-3684, PEM-3665 - Added alert details sql for Alert Errors, Table bloat, Dead/live tuples and few other. \[Support Ticket # 1065250] PEM-3802, PEM-3805 - Documented the best security practices for PEM to avoid security vulnerabilities. -PEM-3808 - Improved installtion and upgrade guide as per customer feedback. \[Support Ticket \# 1101462\] +PEM-3808 - Improved installtion and upgrade guide as per customer feedback. \[Support Ticket # 1101462] PEM-3819 - Add webhooks for event-based alerting -[Issue \#1402](https://redmine.postgresql.org/issues/1402) - Added Macro support. -[Issue \#2519](https://redmine.postgresql.org/issues/2519) - Added support to view trigger function under the respective trigger node. -[Issue \#3318](https://redmine.postgresql.org/issues/3318) - Added support to download utility files at the client-side. -[Issue \#3794](https://redmine.postgresql.org/issues/3794) - Allow user to change the database connection from an open query tool tab. -[Issue \#4230](https://redmine.postgresql.org/issues/4230) - Added support to rename query tool and debugger tabs title. -[Issue \#4231](https://redmine.postgresql.org/issues/4231) - Added support for dynamic tab size. -[Issue \#4232](https://redmine.postgresql.org/issues/4232) - Added tab title placeholder for Query Tool, View/Edit Data, and Debugger. -[Issue \#5200](https://redmine.postgresql.org/issues/5200) - Added support to ignore the owner while comparing objects in the Schema Diff tool. -[Issue \#5857](https://redmine.postgresql.org/issues/5857) - Added documentation for Macro support. +[Issue #1402](https://redmine.postgresql.org/issues/1402) - Added Macro support. +[Issue #2519](https://redmine.postgresql.org/issues/2519) - Added support to view trigger function under the respective trigger node. +[Issue #3318](https://redmine.postgresql.org/issues/3318) - Added support to download utility files at the client-side. +[Issue #3794](https://redmine.postgresql.org/issues/3794) - Allow user to change the database connection from an open query tool tab. +[Issue #4230](https://redmine.postgresql.org/issues/4230) - Added support to rename query tool and debugger tabs title. +[Issue #4231](https://redmine.postgresql.org/issues/4231) - Added support for dynamic tab size. +[Issue #4232](https://redmine.postgresql.org/issues/4232) - Added tab title placeholder for Query Tool, View/Edit Data, and Debugger. +[Issue #5200](https://redmine.postgresql.org/issues/5200) - Added support to ignore the owner while comparing objects in the Schema Diff tool. +[Issue #5857](https://redmine.postgresql.org/issues/5857) - Added documentation for Macro support. ## Housekeeping -[Issue \#5328](https://redmine.postgresql.org/issues/5328) - Improve code coverage and API test cases for Foreign Tables. -[Issue \#5330](https://redmine.postgresql.org/issues/5330) - Improve code coverage and API test cases for Functions. -[Issue \#5337](https://redmine.postgresql.org/issues/5337) - Improve code coverage and API test cases for Views and Materialized Views. -[Issue \#5395](https://redmine.postgresql.org/issues/5395) - Added RESQL/MSQL test cases for Functions. -[Issue \#5497](https://redmine.postgresql.org/issues/5497) - Merged the latest code of 'pgcli' used for the autocomplete feature. -[Issue \#5938](https://redmine.postgresql.org/issues/5938) - Documentation of Storage Manager. +[Issue #5328](https://redmine.postgresql.org/issues/5328) - Improve code coverage and API test cases for Foreign Tables. +[Issue #5330](https://redmine.postgresql.org/issues/5330) - Improve code coverage and API test cases for Functions. +[Issue #5337](https://redmine.postgresql.org/issues/5337) - Improve code coverage and API test cases for Views and Materialized Views. +[Issue #5395](https://redmine.postgresql.org/issues/5395) - Added RESQL/MSQL test cases for Functions. +[Issue #5497](https://redmine.postgresql.org/issues/5497) - Merged the latest code of 'pgcli' used for the autocomplete feature. +[Issue #5938](https://redmine.postgresql.org/issues/5938) - Documentation of Storage Manager. ## Bug fixes -PEM-672 - Documented that while configuring the pem server, certificates must be present in data directory of backend database server in the installation guides. \[Support ticket \# 729238\] -PEM-3184 - Fixed an issue where connections were not getting released when user disconnects the database server. \[Support Ticket \# 969833\] -PEM-3737 - Fixed an issue where BART integration isn't working, when BART is installed at custom location. \[Support Ticket \# 1088574\] -PEM-3791 - Do not include detail alert information while sending SNMP traps. \[Support Ticket \# 1069206, \# 1115277\] -PEM-3816 - Fixed the issue of making two database connections for failed login attempt which result in locking the user profile. \[Support Ticket \# 1103288\] +PEM-672 - Documented that while configuring the pem server, certificates must be present in data directory of backend database server in the installation guides. \[Support ticket # 729238] +PEM-3184 - Fixed an issue where connections were not getting released when user disconnects the database server. \[Support Ticket # 969833] +PEM-3737 - Fixed an issue where BART integration isn't working, when BART is installed at custom location. \[Support Ticket # 1088574] +PEM-3791 - Do not include detail alert information while sending SNMP traps. \[Support Ticket # 1069206, # 1115277] +PEM-3816 - Fixed the issue of making two database connections for failed login attempt which result in locking the user profile. \[Support Ticket # 1103288] PEM-3795 - Added python3 as prerequisite for PEM in Upgrade and migration guide. -PEM-3799 - Package deployment and streaming replication deprecation warning is added in Upgrade and migration guide. \[Support ticket \# 1021617\] -[Issue \#4639](https://redmine.postgresql.org/issues/4639) - Ensure that some fields should be disabled for the trigger in edit mode. -[Issue \#4806](https://redmine.postgresql.org/issues/4806) - Added useful message when the explain plan is not used and empty. -[Issue \#4855](https://redmine.postgresql.org/issues/4855) - Fixed an issue where file extension is stripped on renaming a file. -[Issue \#5131](https://redmine.postgresql.org/issues/5131) - Ensure that 'ctrl + a' shortcut does not move the cursor in SQL editor. -[Issue \#5826](https://redmine.postgresql.org/issues/5826) - Fixed an issue where schema diff is showing identical table as different due to default vacuum settings. -[Issue \#5830](https://redmine.postgresql.org/issues/5830) - Fixed reverse engineering SQL where parenthesis is not properly arranged for View/MView definition. -[Issue \#5835](https://redmine.postgresql.org/issues/5835) - Fixed 'can't execute an empty query' message if the user change the option of Auto FK Index. -[Issue \#5841](https://redmine.postgresql.org/issues/5841) - Fixed an issue where the server is not able to connect using the service. -[Issue \#5842](https://redmine.postgresql.org/issues/5842) - Ensure that query history should be listed by date/time in descending order. -[Issue \#5843](https://redmine.postgresql.org/issues/5843) - Fixed an issue where the 'PARALLEL UNSAFE' option is missing from reverse engineering SQL of function/procedure. -[Issue \#5853](https://redmine.postgresql.org/issues/5853) - Fixed an issue where 'Rows X' column values were not visible properly for Explain Analyze in Dark theme. -[Issue \#5855](https://redmine.postgresql.org/issues/5855) - Ensure that the user should be able to change the start value of the existing sequence. -[Issue \#5858](https://redmine.postgresql.org/issues/5858) - Ensure that search object functionality works with case insensitive string. -[Issue \#5882](https://redmine.postgresql.org/issues/5882) - Fixed invalid literal issue when fetching dependencies for Materialized View. -[Issue \#5885](https://redmine.postgresql.org/issues/5885) - Fixed an issue where the user is unable to change the macro name. -[Issue \#5895](https://redmine.postgresql.org/issues/5895) - Fixed an issue where the suffix for Toast table size is not visible in the Statistics tab. -[Issue \#5911](https://redmine.postgresql.org/issues/5911) - Ensure that macros should be run on the older version of Safari and Chrome. -[Issue \#5914](https://redmine.postgresql.org/issues/5914) - Fixed an issue where a mismatch in the value of 'Estimated row' for functions. -[Issue \#5923](https://redmine.postgresql.org/issues/5923) - Fixed an issue where non-closeable tabs are getting closed. -[Issue \#5943](https://redmine.postgresql.org/issues/5943) - Ensure that folder rename should work properly in Storage Manager. -[Issue \#5950](https://redmine.postgresql.org/issues/5950) - Fixed an issue where a long file name is not visible on the process watcher dialog. -[Issue \#5953](https://redmine.postgresql.org/issues/5953) - Fixed an issue where connection to the server is on wait state if a different user is provided. -[Issue \#5959](https://redmine.postgresql.org/issues/5959) - Ensure that Grant Wizard should include foreign tables. -[Issue \#5974](https://redmine.postgresql.org/issues/5974) - Fixed an issue where the debugger's custom tab title not applied when opened in the new browser tab. -[Issue \#5978](https://redmine.postgresql.org/issues/5978) - Fixed an issue where dynamic tab title has not applied the first time for debugger panel. -[Issue \#5983](https://redmine.postgresql.org/issues/5983) - Added the appropriate server icon based on the server type in the new connection dialog. -[Issue \#5985](https://redmine.postgresql.org/issues/5985) - Fixed an issue where the process watcher dialog throws an error for the database server which is already removed. +PEM-3799 - Package deployment and streaming replication deprecation warning is added in Upgrade and migration guide. \[Support ticket # 1021617] +[Issue #4639](https://redmine.postgresql.org/issues/4639) - Ensure that some fields should be disabled for the trigger in edit mode. +[Issue #4806](https://redmine.postgresql.org/issues/4806) - Added useful message when the explain plan is not used and empty. +[Issue #4855](https://redmine.postgresql.org/issues/4855) - Fixed an issue where file extension is stripped on renaming a file. +[Issue #5131](https://redmine.postgresql.org/issues/5131) - Ensure that 'ctrl + a' shortcut does not move the cursor in SQL editor. +[Issue #5826](https://redmine.postgresql.org/issues/5826) - Fixed an issue where schema diff is showing identical table as different due to default vacuum settings. +[Issue #5830](https://redmine.postgresql.org/issues/5830) - Fixed reverse engineering SQL where parenthesis is not properly arranged for View/MView definition. +[Issue #5835](https://redmine.postgresql.org/issues/5835) - Fixed 'can't execute an empty query' message if the user change the option of Auto FK Index. +[Issue #5841](https://redmine.postgresql.org/issues/5841) - Fixed an issue where the server is not able to connect using the service. +[Issue #5842](https://redmine.postgresql.org/issues/5842) - Ensure that query history should be listed by date/time in descending order. +[Issue #5843](https://redmine.postgresql.org/issues/5843) - Fixed an issue where the 'PARALLEL UNSAFE' option is missing from reverse engineering SQL of function/procedure. +[Issue #5853](https://redmine.postgresql.org/issues/5853) - Fixed an issue where 'Rows X' column values were not visible properly for Explain Analyze in Dark theme. +[Issue #5855](https://redmine.postgresql.org/issues/5855) - Ensure that the user should be able to change the start value of the existing sequence. +[Issue #5858](https://redmine.postgresql.org/issues/5858) - Ensure that search object functionality works with case insensitive string. +[Issue #5882](https://redmine.postgresql.org/issues/5882) - Fixed invalid literal issue when fetching dependencies for Materialized View. +[Issue #5885](https://redmine.postgresql.org/issues/5885) - Fixed an issue where the user is unable to change the macro name. +[Issue #5895](https://redmine.postgresql.org/issues/5895) - Fixed an issue where the suffix for Toast table size is not visible in the Statistics tab. +[Issue #5911](https://redmine.postgresql.org/issues/5911) - Ensure that macros should be run on the older version of Safari and Chrome. +[Issue #5914](https://redmine.postgresql.org/issues/5914) - Fixed an issue where a mismatch in the value of 'Estimated row' for functions. +[Issue #5923](https://redmine.postgresql.org/issues/5923) - Fixed an issue where non-closeable tabs are getting closed. +[Issue #5943](https://redmine.postgresql.org/issues/5943) - Ensure that folder rename should work properly in Storage Manager. +[Issue #5950](https://redmine.postgresql.org/issues/5950) - Fixed an issue where a long file name is not visible on the process watcher dialog. +[Issue #5953](https://redmine.postgresql.org/issues/5953) - Fixed an issue where connection to the server is on wait state if a different user is provided. +[Issue #5959](https://redmine.postgresql.org/issues/5959) - Ensure that Grant Wizard should include foreign tables. +[Issue #5974](https://redmine.postgresql.org/issues/5974) - Fixed an issue where the debugger's custom tab title not applied when opened in the new browser tab. +[Issue #5978](https://redmine.postgresql.org/issues/5978) - Fixed an issue where dynamic tab title has not applied the first time for debugger panel. +[Issue #5983](https://redmine.postgresql.org/issues/5983) - Added the appropriate server icon based on the server type in the new connection dialog. +[Issue #5985](https://redmine.postgresql.org/issues/5985) - Fixed an issue where the process watcher dialog throws an error for the database server which is already removed. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/03_pem_release_notes_7_16.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/03_pem_release_notes_7_16.mdx index a03b70a458a..a44c81868a7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/03_pem_release_notes_7_16.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/03_pem_release_notes_7_16.mdx @@ -2,110 +2,108 @@ title: "PEM v7.16" --- - - Release date: 2020-09-30 ## Features -PEM-3157 - Documentation of Defining and Monitoring Postgres instances on AWS EC2 and RDS is added. \[Support Ticker \#1060981\] -PEM-322 - Use the same agent-id on agent registration using '--force-registration', and regenerate the certificates. \[Support Ticket \#695978\] +PEM-3157 - Documentation of Defining and Monitoring Postgres instances on AWS EC2 and RDS is added. \[Support Ticker #1060981] +PEM-322 - Use the same agent-id on agent registration using '--force-registration', and regenerate the certificates. \[Support Ticket #695978] PEM-688 - Added capability to monitor parameters set by EFM (missingnodes, minimumstandbys and membershipcoordinator). PEM-2200 - Auto discover database servers installed using debian packaging. -PEM-2578 - Allow user to schedule bulk alert blackout from PEM console. \[Support Ticket \#901007\] -PEM-2651 - List all the events and display error message on tooltip in BART activities graph. \[Support Ticket \#950623\] -PEM-3053 - Automate BART Obsolete backup Cleanup process in PEM. \[Support Ticket \#950518\] -PEM-3054 - Manage bart-scanner from PEM console. \[Support Ticket \#951184, \#1027171\] -PEM-3421 - Perform EFM cluster switchover from PEM console. \[Support Ticket \#1059731\] +PEM-2578 - Allow user to schedule bulk alert blackout from PEM console. \[Support Ticket #901007] +PEM-2651 - List all the events and display error message on tooltip in BART activities graph. \[Support Ticket #950623] +PEM-3053 - Automate BART Obsolete backup Cleanup process in PEM. \[Support Ticket #950518] +PEM-3054 - Manage bart-scanner from PEM console. \[Support Ticket #951184, #1027171] +PEM-3421 - Perform EFM cluster switchover from PEM console. \[Support Ticket #1059731] PEM-3528 - Make the current state and related information of the alerts available through REST API PEM-3529 - Make the state change history of the alerts available at Agent, Server & Database through REST API -PEM-3614 - Added details of directory creation and permissions for backups taken using BART in PEM - BART Management Features Guide \[Support Ticket \#1038375\] -PEM-3615 - Allow incremental backup from a parent backup which was taken in tar.gz format. \[Support Ticket \#1059420\] -[Issue \#2042](https://redmine.postgresql.org/issues/2042) - Added SQL Formatter support in Query Tool. -[Issue \#3904](https://redmine.postgresql.org/issues/3904) - Replace charting library Flotr2 with ChartJS using React on the Dashboard panel. -[Issue \#4059](https://redmine.postgresql.org/issues/4059) - Added a new button to the query tool toolbar to open a new query tool window. -[Issue \#5126](https://redmine.postgresql.org/issues/5126) - Modified schema diff tool to compare two databases instead of two schemas. -[Issue \#5653](https://redmine.postgresql.org/issues/5653) - Added High Contrast (Beta) theme support. -[Issue \#5772](https://redmine.postgresql.org/issues/5772) - Warn the user when connecting to a server that is older than PEM supports. +PEM-3614 - Added details of directory creation and permissions for backups taken using BART in PEM - BART Management Features Guide \[Support Ticket #1038375] +PEM-3615 - Allow incremental backup from a parent backup which was taken in tar.gz format. \[Support Ticket #1059420] +[Issue #2042](https://redmine.postgresql.org/issues/2042) - Added SQL Formatter support in Query Tool. +[Issue #3904](https://redmine.postgresql.org/issues/3904) - Replace charting library Flotr2 with ChartJS using React on the Dashboard panel. +[Issue #4059](https://redmine.postgresql.org/issues/4059) - Added a new button to the query tool toolbar to open a new query tool window. +[Issue #5126](https://redmine.postgresql.org/issues/5126) - Modified schema diff tool to compare two databases instead of two schemas. +[Issue #5653](https://redmine.postgresql.org/issues/5653) - Added High Contrast (Beta) theme support. +[Issue #5772](https://redmine.postgresql.org/issues/5772) - Warn the user when connecting to a server that is older than PEM supports. ## Housekeeping -[Issue \#5323](https://redmine.postgresql.org/issues/5323) - Improve code coverage and API test cases for Foreign Data Wrapper. -[Issue \#5324](https://redmine.postgresql.org/issues/5324) - Improve code coverage and API test cases for Foreign Servers and User Mappings. -[Issue \#5327](https://redmine.postgresql.org/issues/5327) - Improve code coverage and API test cases for Schemas. -[Issue \#5332](https://redmine.postgresql.org/issues/5332) - Improve code coverage and API test cases for Columns and Constraints (Index, Foreign Key, Check, Exclusion). -[Issue \#5336](https://redmine.postgresql.org/issues/5336) - Improve code coverage and API test cases for Types. -[Issue \#5344](https://redmine.postgresql.org/issues/5344) - Improve code coverage and API test cases for Grant Wizard. -[Issue \#5731](https://redmine.postgresql.org/issues/5731) - Upgrade font awesome from v4 to v5. -[Issue \#5774](https://redmine.postgresql.org/issues/5774) - Improve code coverage and API test cases for Tables. +[Issue #5323](https://redmine.postgresql.org/issues/5323) - Improve code coverage and API test cases for Foreign Data Wrapper. +[Issue #5324](https://redmine.postgresql.org/issues/5324) - Improve code coverage and API test cases for Foreign Servers and User Mappings. +[Issue #5327](https://redmine.postgresql.org/issues/5327) - Improve code coverage and API test cases for Schemas. +[Issue #5332](https://redmine.postgresql.org/issues/5332) - Improve code coverage and API test cases for Columns and Constraints (Index, Foreign Key, Check, Exclusion). +[Issue #5336](https://redmine.postgresql.org/issues/5336) - Improve code coverage and API test cases for Types. +[Issue #5344](https://redmine.postgresql.org/issues/5344) - Improve code coverage and API test cases for Grant Wizard. +[Issue #5731](https://redmine.postgresql.org/issues/5731) - Upgrade font awesome from v4 to v5. +[Issue #5774](https://redmine.postgresql.org/issues/5774) - Improve code coverage and API test cases for Tables. ## Bug fixes -PEM-812/ Issue \#5830 <https://redmine.postgresql.org/issues/5830> - Display parenthesis around AND/OR conditions in the same order within PEM and dba\_views. Fixed reverse engineering SQL where parenthesis is not properly arranged for View/MView definition. \[Support Ticket \#315838\] -PEM-2579 - Fixed issues in SNMP's MIB file reported by \[Support Ticket \#900679\] -PEM-3532 - Fixed the issue in monitoring dashboard line charts where data points were not showing correct local time information. \[Support Ticket \#1023887\] -PEM-3542 - Fixed potential data type migration problem when upgrading PEM Server to PG v.12 \[Support Ticket \#1044052\] +PEM-812/ Issue #5830 <> - Display parenthesis around AND/OR conditions in the same order within PEM and dba_views. Fixed reverse engineering SQL where parenthesis is not properly arranged for View/MView definition. \[Support Ticket #315838] +PEM-2579 - Fixed issues in SNMP's MIB file reported by \[Support Ticket #900679] +PEM-3532 - Fixed the issue in monitoring dashboard line charts where data points were not showing correct local time information. \[Support Ticket #1023887] +PEM-3542 - Fixed potential data type migration problem when upgrading PEM Server to PG v.12 \[Support Ticket #1044052] PEM-3487 - Fixed configure script issue for google cloud instance running RHEL7 -PEM-3666 - Fixed security issue related to secure flag for session cookies. \[Support Ticket \#1035935\] -PEM-3667 - Fixed security issue related to information exposed in Server response header. \[Support Ticket \#1035935\] -PEM-3677 - Fixed security issue related to content Security Policy (CSP) configuration. \[Support Ticket \#1035935\] -PEM-3678 - Fixed security issue related to HTTP Strict Transport Security (HSTS) in server response header. \[Support Ticket \#1035935\] -PEM-3679 - Fixed security issue related to session fixation. \[Support Ticket \#1035935\] -PEM-3682 - Fixed security issue related to directory listing. \[Support Ticket \#1035935\] -PEM-3693 - Fixed security issue in the Capacity manager report. \[Support Ticket \#1035935\] -PEM-3695 - Transaction ID or timestamp must be provided during BART restore point-in time recovery operation \[Support Ticket \#1067393\] -PEM-3696 - Fixed security issue in the pem configure script where it should not log sensitive informations. \[Support Ticket \#1035935\] -PEM-3663 - Fixed pemworker segfault error in dmesg and messages file. \[Support Ticket \#999681\] -PEM-3698 - Fixed current and previous alert state related issue during alert script execution when alert state gets clear. \[Support Ticket \#1078828\] -PEM-2015 - Fixed vulnerability issues related to webserver. \[Support Ticket \# 856609\] -[Issue \#3767](https://redmine.postgresql.org/issues/3767) - Ensure that the original file format should be retained when saving the same file in SQL editor. -[Issue \#3791](https://redmine.postgresql.org/issues/3791) - Added missing comments in reverse engineering SQL for each column of a View. -[Issue \#4123](https://redmine.postgresql.org/issues/4123) - Fixed an issue where debugger doesn't work if the search path is set other than 'public'. -[Issue \#4216](https://redmine.postgresql.org/issues/4216) - Ensure that schema names starting with 'pg' should be visible in browser tree when standard\_conforming\_strings is set to off. -[Issue \#4361](https://redmine.postgresql.org/issues/4361) - Fixed ssh tunnel hang issue when the user tries to disconnect the server. -[Issue \#4387](https://redmine.postgresql.org/issues/4387) - Fixed an issue where the user is not able to insert the data if the table and columns name contains special characters. -[Issue \#4810](https://redmine.postgresql.org/issues/4810) - Fixed an issue where the user is not able to save the new row if the table is empty. -[Issue \#5137](https://redmine.postgresql.org/issues/5137) - Fixed save button enable issue when focusing in and out of numeric input field. -[Issue \#5417](https://redmine.postgresql.org/issues/5417) - Fixed and improve API test cases for the schema diff tool. -[Issue \#5426](https://redmine.postgresql.org/issues/5426) - Adjusted the height of jobstep code block to use maximum space. -[Issue \#5429](https://redmine.postgresql.org/issues/5429) - Ensure that the Dictionaries drop-down shows all the dictionaries in the FTS configuration dialog. -[Issue \#5526](https://redmine.postgresql.org/issues/5526) - Fixed an issue where copying and pasting a cell with multiple line data will result in multiple rows. -[Issue \#5530](https://redmine.postgresql.org/issues/5530) - Ensure that the referenced table should be displayed on foreign key constraints. -[Issue \#5567](https://redmine.postgresql.org/issues/5567) - Fixed an issue where conversion of bytea to the binary string results in an error. -[Issue \#5569](https://redmine.postgresql.org/issues/5569) - Fixed reverse engineered SQL for partitions when storage parameters are specified. -[Issue \#5604](https://redmine.postgresql.org/issues/5604) - Fixed an issue where the entire logs is in red text when the user runs backup and restore. -[Issue \#5632](https://redmine.postgresql.org/issues/5632) - Ensure that the user will be able to modify the start value of the Identity column. -[Issue \#5646](https://redmine.postgresql.org/issues/5646) - Ensure that RLS Policy node should be searchable using search object. -[Issue \#5652](https://redmine.postgresql.org/issues/5652) - Modified the 'Commit' and 'Rollback' query tool button icons. -[Issue \#5666](https://redmine.postgresql.org/issues/5666) - Added missing dependencies/dependent and corrected some wrongly identified. -[Issue \#5670](https://redmine.postgresql.org/issues/5670) - Fixed an issue where the error message does not have a close button on utility dialogs. -[Issue \#5675](https://redmine.postgresql.org/issues/5675) - Fixed CSRF errors when PEM opened in an iframe on safari browser. -[Issue \#5677](https://redmine.postgresql.org/issues/5677) - Fixed text color issue in explain analyze for the Dark theme. -[Issue \#5686](https://redmine.postgresql.org/issues/5686) - Fixed issue where the user was not able to update policy if the policy is created with space. -[Issue \#5689](https://redmine.postgresql.org/issues/5689) - Added the 'ORDER BY' clause for the privileges type to fix schema diff issue. -[Issue \#5710](https://redmine.postgresql.org/issues/5710) - Fixed an issue when comparing the table with a trigger throwing error in schema diff. -[Issue \#5713](https://redmine.postgresql.org/issues/5713) - Corrected DROP SQL syntax for catalog. -[Issue \#5716](https://redmine.postgresql.org/issues/5716) - Fixed an issue where ajax call continues to fire even after disconnect the database server. -[Issue \#5722](https://redmine.postgresql.org/issues/5722) - Ensure that the user should be able to drop the database even if it is connected. -[Issue \#5724](https://redmine.postgresql.org/issues/5724) - Clarify some of the differences when running in server mode in the docs. -[Issue \#5730](https://redmine.postgresql.org/issues/5730) - Resolve schema diff dependencies by selecting the appropriate node automatically and maintain the order in the generated script. -[Issue \#5732](https://redmine.postgresql.org/issues/5732) - Fixed some accessibility issues. -[Issue \#5734](https://redmine.postgresql.org/issues/5734) - Update the description of GIN and GiST indexes in the documentation. -[Issue \#5739](https://redmine.postgresql.org/issues/5739) - Ensure that the import/export feature should work with SSH Tunnel. -[Issue \#5746](https://redmine.postgresql.org/issues/5746) - Fixed an issue where --load-server does not allow loading connections that use pg\_services. -[Issue \#5748](https://redmine.postgresql.org/issues/5748) - Fixed incorrect reverse engineering SQL for Foreign key when creating a table. -[Issue \#5754](https://redmine.postgresql.org/issues/5754) - Fixed an issue where schema diff is not working when providing the options to Foreign Data Wrapper, Foreign Server, and User Mapping. -[Issue \#5764](https://redmine.postgresql.org/issues/5764) - Fixed SQL for Row Level Security which is incorrectly generated. -[Issue \#5765](https://redmine.postgresql.org/issues/5765) - Fixed an issue in the query tool when columns are having the same name as javascript object internal functions. -[Issue \#5766](https://redmine.postgresql.org/issues/5766) - Fixed string indices must be integers issue for PostgreSQL < 9.3. -[Issue \#5779](https://redmine.postgresql.org/issues/5779) - Remove illegal argument from trigger function in trigger DDL statement. -[Issue \#5794](https://redmine.postgresql.org/issues/5794) - Fixed excessive CPU usage by stopping the indefinite growth of the graph dataset. -[Issue \#5802](https://redmine.postgresql.org/issues/5802) - Remove maximum length on the password field in the server dialog. -[Issue \#5807](https://redmine.postgresql.org/issues/5807) - Fixed an issue where a column is renamed and then removed, then the drop SQL query takes the wrong column name. -[Issue \#5815](https://redmine.postgresql.org/issues/5815) - Fixed an issue where clicking on the 'Generate script' button shows a forever spinner due to pop up blocker. -[Issue \#5816](https://redmine.postgresql.org/issues/5816) - Ensure that the 'CREATE SCHEMA' statement should be present in the generated script if the schema is not present in the target database. -[Issue \#5820](https://redmine.postgresql.org/issues/5820) - Fixed an issue while refreshing Resource Group. -[Issue \#5833](https://redmine.postgresql.org/issues/5833) - Fixed an issue where custom sequences are not visible when show system objects are set to false. -[Issue \#5834](https://redmine.postgresql.org/issues/5834) - Ensure that the 'Remove Server Group' option is available in the context menu. -[Issue \#5839](https://redmine.postgresql.org/issues/5839) - Ensure that multiple extensions can be dropped from the properties tab. -[Issue \#5845](https://redmine.postgresql.org/issues/5845) - Fixed an issue where the query tool is not fetching more than 1000 rows for the table does not have any primary key. +PEM-3666 - Fixed security issue related to secure flag for session cookies. \[Support Ticket #1035935] +PEM-3667 - Fixed security issue related to information exposed in Server response header. \[Support Ticket #1035935] +PEM-3677 - Fixed security issue related to content Security Policy (CSP) configuration. \[Support Ticket #1035935] +PEM-3678 - Fixed security issue related to HTTP Strict Transport Security (HSTS) in server response header. \[Support Ticket #1035935] +PEM-3679 - Fixed security issue related to session fixation. \[Support Ticket #1035935] +PEM-3682 - Fixed security issue related to directory listing. \[Support Ticket #1035935] +PEM-3693 - Fixed security issue in the Capacity manager report. \[Support Ticket #1035935] +PEM-3695 - Transaction ID or timestamp must be provided during BART restore point-in time recovery operation \[Support Ticket #1067393] +PEM-3696 - Fixed security issue in the pem configure script where it should not log sensitive informations. \[Support Ticket #1035935] +PEM-3663 - Fixed pemworker segfault error in dmesg and messages file. \[Support Ticket #999681] +PEM-3698 - Fixed current and previous alert state related issue during alert script execution when alert state gets clear. \[Support Ticket #1078828] +PEM-2015 - Fixed vulnerability issues related to webserver. \[Support Ticket # 856609] +[Issue #3767](https://redmine.postgresql.org/issues/3767) - Ensure that the original file format should be retained when saving the same file in SQL editor. +[Issue #3791](https://redmine.postgresql.org/issues/3791) - Added missing comments in reverse engineering SQL for each column of a View. +[Issue #4123](https://redmine.postgresql.org/issues/4123) - Fixed an issue where debugger doesn't work if the search path is set other than 'public'. +[Issue #4216](https://redmine.postgresql.org/issues/4216) - Ensure that schema names starting with 'pg' should be visible in browser tree when standard_conforming_strings is set to off. +[Issue #4361](https://redmine.postgresql.org/issues/4361) - Fixed ssh tunnel hang issue when the user tries to disconnect the server. +[Issue #4387](https://redmine.postgresql.org/issues/4387) - Fixed an issue where the user is not able to insert the data if the table and columns name contains special characters. +[Issue #4810](https://redmine.postgresql.org/issues/4810) - Fixed an issue where the user is not able to save the new row if the table is empty. +[Issue #5137](https://redmine.postgresql.org/issues/5137) - Fixed save button enable issue when focusing in and out of numeric input field. +[Issue #5417](https://redmine.postgresql.org/issues/5417) - Fixed and improve API test cases for the schema diff tool. +[Issue #5426](https://redmine.postgresql.org/issues/5426) - Adjusted the height of jobstep code block to use maximum space. +[Issue #5429](https://redmine.postgresql.org/issues/5429) - Ensure that the Dictionaries drop-down shows all the dictionaries in the FTS configuration dialog. +[Issue #5526](https://redmine.postgresql.org/issues/5526) - Fixed an issue where copying and pasting a cell with multiple line data will result in multiple rows. +[Issue #5530](https://redmine.postgresql.org/issues/5530) - Ensure that the referenced table should be displayed on foreign key constraints. +[Issue #5567](https://redmine.postgresql.org/issues/5567) - Fixed an issue where conversion of bytea to the binary string results in an error. +[Issue #5569](https://redmine.postgresql.org/issues/5569) - Fixed reverse engineered SQL for partitions when storage parameters are specified. +[Issue #5604](https://redmine.postgresql.org/issues/5604) - Fixed an issue where the entire logs is in red text when the user runs backup and restore. +[Issue #5632](https://redmine.postgresql.org/issues/5632) - Ensure that the user will be able to modify the start value of the Identity column. +[Issue #5646](https://redmine.postgresql.org/issues/5646) - Ensure that RLS Policy node should be searchable using search object. +[Issue #5652](https://redmine.postgresql.org/issues/5652) - Modified the 'Commit' and 'Rollback' query tool button icons. +[Issue #5666](https://redmine.postgresql.org/issues/5666) - Added missing dependencies/dependent and corrected some wrongly identified. +[Issue #5670](https://redmine.postgresql.org/issues/5670) - Fixed an issue where the error message does not have a close button on utility dialogs. +[Issue #5675](https://redmine.postgresql.org/issues/5675) - Fixed CSRF errors when PEM opened in an iframe on safari browser. +[Issue #5677](https://redmine.postgresql.org/issues/5677) - Fixed text color issue in explain analyze for the Dark theme. +[Issue #5686](https://redmine.postgresql.org/issues/5686) - Fixed issue where the user was not able to update policy if the policy is created with space. +[Issue #5689](https://redmine.postgresql.org/issues/5689) - Added the 'ORDER BY' clause for the privileges type to fix schema diff issue. +[Issue #5710](https://redmine.postgresql.org/issues/5710) - Fixed an issue when comparing the table with a trigger throwing error in schema diff. +[Issue #5713](https://redmine.postgresql.org/issues/5713) - Corrected DROP SQL syntax for catalog. +[Issue #5716](https://redmine.postgresql.org/issues/5716) - Fixed an issue where ajax call continues to fire even after disconnect the database server. +[Issue #5722](https://redmine.postgresql.org/issues/5722) - Ensure that the user should be able to drop the database even if it is connected. +[Issue #5724](https://redmine.postgresql.org/issues/5724) - Clarify some of the differences when running in server mode in the docs. +[Issue #5730](https://redmine.postgresql.org/issues/5730) - Resolve schema diff dependencies by selecting the appropriate node automatically and maintain the order in the generated script. +[Issue #5732](https://redmine.postgresql.org/issues/5732) - Fixed some accessibility issues. +[Issue #5734](https://redmine.postgresql.org/issues/5734) - Update the description of GIN and GiST indexes in the documentation. +[Issue #5739](https://redmine.postgresql.org/issues/5739) - Ensure that the import/export feature should work with SSH Tunnel. +[Issue #5746](https://redmine.postgresql.org/issues/5746) - Fixed an issue where --load-server does not allow loading connections that use pg_services. +[Issue #5748](https://redmine.postgresql.org/issues/5748) - Fixed incorrect reverse engineering SQL for Foreign key when creating a table. +[Issue #5754](https://redmine.postgresql.org/issues/5754) - Fixed an issue where schema diff is not working when providing the options to Foreign Data Wrapper, Foreign Server, and User Mapping. +[Issue #5764](https://redmine.postgresql.org/issues/5764) - Fixed SQL for Row Level Security which is incorrectly generated. +[Issue #5765](https://redmine.postgresql.org/issues/5765) - Fixed an issue in the query tool when columns are having the same name as javascript object internal functions. +[Issue #5766](https://redmine.postgresql.org/issues/5766) - Fixed string indices must be integers issue for PostgreSQL < 9.3. +[Issue #5779](https://redmine.postgresql.org/issues/5779) - Remove illegal argument from trigger function in trigger DDL statement. +[Issue #5794](https://redmine.postgresql.org/issues/5794) - Fixed excessive CPU usage by stopping the indefinite growth of the graph dataset. +[Issue #5802](https://redmine.postgresql.org/issues/5802) - Remove maximum length on the password field in the server dialog. +[Issue #5807](https://redmine.postgresql.org/issues/5807) - Fixed an issue where a column is renamed and then removed, then the drop SQL query takes the wrong column name. +[Issue #5815](https://redmine.postgresql.org/issues/5815) - Fixed an issue where clicking on the 'Generate script' button shows a forever spinner due to pop up blocker. +[Issue #5816](https://redmine.postgresql.org/issues/5816) - Ensure that the 'CREATE SCHEMA' statement should be present in the generated script if the schema is not present in the target database. +[Issue #5820](https://redmine.postgresql.org/issues/5820) - Fixed an issue while refreshing Resource Group. +[Issue #5833](https://redmine.postgresql.org/issues/5833) - Fixed an issue where custom sequences are not visible when show system objects are set to false. +[Issue #5834](https://redmine.postgresql.org/issues/5834) - Ensure that the 'Remove Server Group' option is available in the context menu. +[Issue #5839](https://redmine.postgresql.org/issues/5839) - Ensure that multiple extensions can be dropped from the properties tab. +[Issue #5845](https://redmine.postgresql.org/issues/5845) - Fixed an issue where the query tool is not fetching more than 1000 rows for the table does not have any primary key. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/04_pem_release_notes_7_15.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/04_pem_release_notes_7_15.mdx index 044eb7fa30f..7d31551776b 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/04_pem_release_notes_7_15.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/04_pem_release_notes_7_15.mdx @@ -2,81 +2,79 @@ title: "PEM v7.15" --- - - Release date: 2020-07-22 ## Features -PEM-2457 - Added support for schema level restriction. \[Support Ticket \#883404\] +PEM-2457 - Added support for schema level restriction. \[Support Ticket #883404] PEM-2540 - Added capability to monitor the xLogReceive parameter set by EFM PEM-3432 - Added support to monitor PG/EPAS 13 -PEM-3445 - Support new configurations of BART ('archive\_path', 'bart\_socket\_directory') in PEM \[Support Ticket \#1015972\] -PEM-3446 - Document SNMP trap oid detailed information used by PEM \[Support Ticket \#900679\] +PEM-3445 - Support new configurations of BART ('archive_path', 'bart_socket_directory') in PEM \[Support Ticket #1015972] +PEM-3446 - Document SNMP trap oid detailed information used by PEM \[Support Ticket #900679] PEM-3482 - Improve the performance diagnostics tool to show CPU usage for the active sessions along with the wait events for better performance analysis -[Issue \#5452](https://redmine.postgresql.org/issues/5452) - Added connected PEM user and connection name in the log file. -[Issue \#5468](https://redmine.postgresql.org/issues/5468) - Added option to ignore the whitespaces while comparing objects in schema diff. -[Issue \#5500](https://redmine.postgresql.org/issues/5500) - Added server group name while selecting servers in schema diff. -[Issue \#5516](https://redmine.postgresql.org/issues/5516) - Added support of Row Security Policies. -[Issue \#5576](https://redmine.postgresql.org/issues/5576) - Improve error messaging if the storage and log directories cannot be created. -[Issue \#5601](https://redmine.postgresql.org/issues/5601) - Added RLS Policy support in Schema Diff. -[Issue \#5622](https://redmine.postgresql.org/issues/5622) - Added support for permissive/restricted policy type while creating RLS Policy. +[Issue #5452](https://redmine.postgresql.org/issues/5452) - Added connected PEM user and connection name in the log file. +[Issue #5468](https://redmine.postgresql.org/issues/5468) - Added option to ignore the whitespaces while comparing objects in schema diff. +[Issue #5500](https://redmine.postgresql.org/issues/5500) - Added server group name while selecting servers in schema diff. +[Issue #5516](https://redmine.postgresql.org/issues/5516) - Added support of Row Security Policies. +[Issue #5576](https://redmine.postgresql.org/issues/5576) - Improve error messaging if the storage and log directories cannot be created. +[Issue #5601](https://redmine.postgresql.org/issues/5601) - Added RLS Policy support in Schema Diff. +[Issue #5622](https://redmine.postgresql.org/issues/5622) - Added support for permissive/restricted policy type while creating RLS Policy. ## Housekeeping -[Issue \#5325](https://redmine.postgresql.org/issues/5325) - Improve code coverage and API test cases for Collations. -[Issue \#5326](https://redmine.postgresql.org/issues/5326) - Improve code coverage and API test cases for Domain and Domain Constraints. -[Issue \#5329](https://redmine.postgresql.org/issues/5329) - Improve code coverage and API test cases for FTS Configuration, FTS Parser, FTS Dictionaries, and FTS Template. -[Issue \#5333](https://redmine.postgresql.org/issues/5333) - Improve code coverage and API test cases for Indexes. -[Issue \#5334](https://redmine.postgresql.org/issues/5334) - Improve code coverage and API test cases for the Rules module. -[Issue \#5335](https://redmine.postgresql.org/issues/5335) - Improve code coverage and API test cases for Triggers and Compound Triggers. -[Issue \#5455](https://redmine.postgresql.org/issues/5455) - Refactor PEM entrypoint python file so it can be imported and is a lot more readable. -[Issue \#5493](https://redmine.postgresql.org/issues/5493) - Search object UI improvements. -[Issue \#5581](https://redmine.postgresql.org/issues/5581) - Documentation of Row Level Security Policies. +[Issue #5325](https://redmine.postgresql.org/issues/5325) - Improve code coverage and API test cases for Collations. +[Issue #5326](https://redmine.postgresql.org/issues/5326) - Improve code coverage and API test cases for Domain and Domain Constraints. +[Issue #5329](https://redmine.postgresql.org/issues/5329) - Improve code coverage and API test cases for FTS Configuration, FTS Parser, FTS Dictionaries, and FTS Template. +[Issue #5333](https://redmine.postgresql.org/issues/5333) - Improve code coverage and API test cases for Indexes. +[Issue #5334](https://redmine.postgresql.org/issues/5334) - Improve code coverage and API test cases for the Rules module. +[Issue #5335](https://redmine.postgresql.org/issues/5335) - Improve code coverage and API test cases for Triggers and Compound Triggers. +[Issue #5455](https://redmine.postgresql.org/issues/5455) - Refactor PEM entrypoint python file so it can be imported and is a lot more readable. +[Issue #5493](https://redmine.postgresql.org/issues/5493) - Search object UI improvements. +[Issue #5581](https://redmine.postgresql.org/issues/5581) - Documentation of Row Level Security Policies. ## Bug fixes -PEM-699 - "Long running queries" alert should not log autovacuum queries. \[Support Ticket \#679920\] -PEM-3349 - pemworker register server help option should show the agent configuration directory path. \[Support Ticket \#1001212\] -PEM-1508 - Documented about calculation of shared system memory and removed it from capacity manager metrics as is always constant. \[809378\] -PEM-3322 - Documented about system jobs and their default schedules. \[990553\] -PEM-3436 - Updated all the alert templates queries which show negative values and improve the performance while fetching the data from history tables. \[992418\] -PEM-2490 - Supported Platforms and Versions link is added to Software prerequisites section in Installation guides. \[Support Ticket \#885334\] -PEM-3490 - Removed send\_email from POST/PUT payload and added validation for all\_low\_alert\_enable/high\_low\_alert\_enable/med\_low\_alert\_enable/low\_alert\_enable for their respective email group id. \[Support Ticket \#1005781\] -[Issue \#3591](https://redmine.postgresql.org/issues/3591) - Ensure that the query tool should display the proper error message while terminating the active session. -[Issue \#3669](https://redmine.postgresql.org/issues/3669) - Ensure that proper error should be displayed for the deleted node. -[Issue \#3694](https://redmine.postgresql.org/issues/3694) - Gracefully informed the user that the database is already connected when they click on "Connect Database...". -[Issue \#3787](https://redmine.postgresql.org/issues/3787) - Disabled the Stop process button after clicking it and added a message 'Terminating the process...' to notify the user. -[Issue \#3814](https://redmine.postgresql.org/issues/3814) - Fixed issue of error message not getting displayed when filename is empty for backup, restore, and import/export. -[Issue \#3851](https://redmine.postgresql.org/issues/3851) - Add proper indentation to the code while generating functions, procedures, and trigger functions. -[Issue \#4033](https://redmine.postgresql.org/issues/4033) - Fixed an issue where clicking on the cross button of the alert box on the login page is not working. -[Issue \#4099](https://redmine.postgresql.org/issues/4099) - Fixed the SQL help issue for EDB Postgres Advanced Server. -[Issue \#4223](https://redmine.postgresql.org/issues/4223) - Ensure that maintenance jobs should be worked properly for indexes under a materialized view. -[Issue \#4226](https://redmine.postgresql.org/issues/4226) - Fixed an issue where select all checkbox only selects the first 50 tables. -[Issue \#4235](https://redmine.postgresql.org/issues/4235) - Fixed tab indent issue on a selection of lines is deleting the content when 'use spaces == true' in the preferences. -[Issue \#4840](https://redmine.postgresql.org/issues/4840) - Ensure that 'With OID' option should be disabled while taking backup of database server version 12 and above. -[Issue \#5001](https://redmine.postgresql.org/issues/5001) - Fixed invalid literal issue when removing the connection limit for the existing role. -[Issue \#5287](https://redmine.postgresql.org/issues/5287) - Fixed dark theme-related CSS and modify the color codes. -[Issue \#5398](https://redmine.postgresql.org/issues/5398) - Fixed generated SQL issue for auto vacuum options. -[Issue \#5416](https://redmine.postgresql.org/issues/5416) - Ensure that the query tool panel gets closed when clicking on the 'Don't Save' button. -[Issue \#5422](https://redmine.postgresql.org/issues/5422) - Ensure that the dependencies tab shows correct information for Synonyms. -[Issue \#5434](https://redmine.postgresql.org/issues/5434) - Fixed an issue where the newly added table is not alphabetically added to the tree. -[Issue \#5440](https://redmine.postgresql.org/issues/5440) - Fixed list sorting issue in the schema diff tool. -[Issue \#5449](https://redmine.postgresql.org/issues/5449) - Fixed an issue while comparing the two identical schemas using the schema diff tool. -[Issue \#5450](https://redmine.postgresql.org/issues/5450) - Fixed an issue when renaming the column not added in the proper order. -[Issue \#5463](https://redmine.postgresql.org/issues/5463) - Fixed an issue where CSV download quotes numeric columns. -[Issue \#5465](https://redmine.postgresql.org/issues/5465) - Fixed an issue where the Edge browser version is showing wrong and warning message gets displayed. -[Issue \#5470](https://redmine.postgresql.org/issues/5470) - Fixed backgrid row hover issue where on hover background color is set for edit and delete cell only. -[Issue \#5481](https://redmine.postgresql.org/issues/5481) - Fixed data truncation issue when updating the data of type character with length. -[Issue \#5492](https://redmine.postgresql.org/issues/5492) - Fixed an issue where the search object is unable to locate inherited tables and constraint filters are not working. -[Issue \#5496](https://redmine.postgresql.org/issues/5496) - Fixed an issue where clicking on Select All button, not selecting all the options in pgAgent job scheduler. -[Issue \#5507](https://redmine.postgresql.org/issues/5507) - Fixed connection and version number detection issue when the database server is upgraded. -[Issue \#5539](https://redmine.postgresql.org/issues/5539) - Fixed typo in exception keyword. -[Issue \#5584](https://redmine.postgresql.org/issues/5584) - Fixed an issue where two identical tables showing different by schema diff tool. -[Issue \#5620](https://redmine.postgresql.org/issues/5620) - Fixed an issue while creating RLS Policy with the name having space. -[Issue \#5621](https://redmine.postgresql.org/issues/5621) - Remove extra brackets from reverse engineering SQL of RLS Policy. -[Issue \#5629](https://redmine.postgresql.org/issues/5629) - Fixed an issue where the user is able to edit properties when some of the collection nodes are selected. -[Issue \#5631](https://redmine.postgresql.org/issues/5631) - Fixed 'cant execute empty query' issue when remove the value of 'USING' or 'WITH CHECK' option of RLS Policy. -[Issue \#5633](https://redmine.postgresql.org/issues/5633) - Ensure that create RLS Policy menu should not be visible for catalog objects. -[Issue \#5647](https://redmine.postgresql.org/issues/5647) - Fixed an issue where difference DDL is showing the wrong SQL when changing the policy owner. -[Issue \#5673](https://redmine.postgresql.org/issues/5673) - Fixed an issue where fetching the schema throws an error if the database is not connected in Schema Diff. +PEM-699 - "Long running queries" alert should not log autovacuum queries. \[Support Ticket #679920] +PEM-3349 - pemworker register server help option should show the agent configuration directory path. \[Support Ticket #1001212] +PEM-1508 - Documented about calculation of shared system memory and removed it from capacity manager metrics as is always constant. \[809378] +PEM-3322 - Documented about system jobs and their default schedules. \[990553] +PEM-3436 - Updated all the alert templates queries which show negative values and improve the performance while fetching the data from history tables. \[992418] +PEM-2490 - Supported Platforms and Versions link is added to Software prerequisites section in Installation guides. \[Support Ticket #885334] +PEM-3490 - Removed send_email from POST/PUT payload and added validation for all_low_alert_enable/high_low_alert_enable/med_low_alert_enable/low_alert_enable for their respective email group id. \[Support Ticket #1005781] +[Issue #3591](https://redmine.postgresql.org/issues/3591) - Ensure that the query tool should display the proper error message while terminating the active session. +[Issue #3669](https://redmine.postgresql.org/issues/3669) - Ensure that proper error should be displayed for the deleted node. +[Issue #3694](https://redmine.postgresql.org/issues/3694) - Gracefully informed the user that the database is already connected when they click on "Connect Database...". +[Issue #3787](https://redmine.postgresql.org/issues/3787) - Disabled the Stop process button after clicking it and added a message 'Terminating the process...' to notify the user. +[Issue #3814](https://redmine.postgresql.org/issues/3814) - Fixed issue of error message not getting displayed when filename is empty for backup, restore, and import/export. +[Issue #3851](https://redmine.postgresql.org/issues/3851) - Add proper indentation to the code while generating functions, procedures, and trigger functions. +[Issue #4033](https://redmine.postgresql.org/issues/4033) - Fixed an issue where clicking on the cross button of the alert box on the login page is not working. +[Issue #4099](https://redmine.postgresql.org/issues/4099) - Fixed the SQL help issue for EDB Postgres Advanced Server. +[Issue #4223](https://redmine.postgresql.org/issues/4223) - Ensure that maintenance jobs should be worked properly for indexes under a materialized view. +[Issue #4226](https://redmine.postgresql.org/issues/4226) - Fixed an issue where select all checkbox only selects the first 50 tables. +[Issue #4235](https://redmine.postgresql.org/issues/4235) - Fixed tab indent issue on a selection of lines is deleting the content when 'use spaces == true' in the preferences. +[Issue #4840](https://redmine.postgresql.org/issues/4840) - Ensure that 'With OID' option should be disabled while taking backup of database server version 12 and above. +[Issue #5001](https://redmine.postgresql.org/issues/5001) - Fixed invalid literal issue when removing the connection limit for the existing role. +[Issue #5287](https://redmine.postgresql.org/issues/5287) - Fixed dark theme-related CSS and modify the color codes. +[Issue #5398](https://redmine.postgresql.org/issues/5398) - Fixed generated SQL issue for auto vacuum options. +[Issue #5416](https://redmine.postgresql.org/issues/5416) - Ensure that the query tool panel gets closed when clicking on the 'Don't Save' button. +[Issue #5422](https://redmine.postgresql.org/issues/5422) - Ensure that the dependencies tab shows correct information for Synonyms. +[Issue #5434](https://redmine.postgresql.org/issues/5434) - Fixed an issue where the newly added table is not alphabetically added to the tree. +[Issue #5440](https://redmine.postgresql.org/issues/5440) - Fixed list sorting issue in the schema diff tool. +[Issue #5449](https://redmine.postgresql.org/issues/5449) - Fixed an issue while comparing the two identical schemas using the schema diff tool. +[Issue #5450](https://redmine.postgresql.org/issues/5450) - Fixed an issue when renaming the column not added in the proper order. +[Issue #5463](https://redmine.postgresql.org/issues/5463) - Fixed an issue where CSV download quotes numeric columns. +[Issue #5465](https://redmine.postgresql.org/issues/5465) - Fixed an issue where the Edge browser version is showing wrong and warning message gets displayed. +[Issue #5470](https://redmine.postgresql.org/issues/5470) - Fixed backgrid row hover issue where on hover background color is set for edit and delete cell only. +[Issue #5481](https://redmine.postgresql.org/issues/5481) - Fixed data truncation issue when updating the data of type character with length. +[Issue #5492](https://redmine.postgresql.org/issues/5492) - Fixed an issue where the search object is unable to locate inherited tables and constraint filters are not working. +[Issue #5496](https://redmine.postgresql.org/issues/5496) - Fixed an issue where clicking on Select All button, not selecting all the options in pgAgent job scheduler. +[Issue #5507](https://redmine.postgresql.org/issues/5507) - Fixed connection and version number detection issue when the database server is upgraded. +[Issue #5539](https://redmine.postgresql.org/issues/5539) - Fixed typo in exception keyword. +[Issue #5584](https://redmine.postgresql.org/issues/5584) - Fixed an issue where two identical tables showing different by schema diff tool. +[Issue #5620](https://redmine.postgresql.org/issues/5620) - Fixed an issue while creating RLS Policy with the name having space. +[Issue #5621](https://redmine.postgresql.org/issues/5621) - Remove extra brackets from reverse engineering SQL of RLS Policy. +[Issue #5629](https://redmine.postgresql.org/issues/5629) - Fixed an issue where the user is able to edit properties when some of the collection nodes are selected. +[Issue #5631](https://redmine.postgresql.org/issues/5631) - Fixed 'cant execute empty query' issue when remove the value of 'USING' or 'WITH CHECK' option of RLS Policy. +[Issue #5633](https://redmine.postgresql.org/issues/5633) - Ensure that create RLS Policy menu should not be visible for catalog objects. +[Issue #5647](https://redmine.postgresql.org/issues/5647) - Fixed an issue where difference DDL is showing the wrong SQL when changing the policy owner. +[Issue #5673](https://redmine.postgresql.org/issues/5673) - Fixed an issue where fetching the schema throws an error if the database is not connected in Schema Diff. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/05_pem_release_notes_7_14.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/05_pem_release_notes_7_14.mdx index 00744a541f8..5843ead568e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/05_pem_release_notes_7_14.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/05_pem_release_notes_7_14.mdx @@ -2,110 +2,108 @@ title: "PEM v7.14" --- - - Release date: 2020-05-13 ## Features -PEM-799 - Allow multiple agents to send emails & SNMP traps by enabling the configurations 'enable\_smtp' & 'enable\_snmp' in agent configuration (agent.cfg) file. +PEM-799 - Allow multiple agents to send emails & SNMP traps by enabling the configurations 'enable_smtp' & 'enable_snmp' in agent configuration (agent.cfg) file. PEM-1593 - Introduced a new state 'Unmanaged' for the database servers for which are not being not monitored by PEM, but registered with it. -PEM-3283 - Modified the configuration script, installer & PEMAgent to generate certificates using 4096 bit key, and modified SSLUtils to sign certificates using SHA256 Hash \[Support Ticket \#989497\] -PEM-3283 - PEMAgent service will not get disabled after upgrading \[Support Ticket \#994563\] +PEM-3283 - Modified the configuration script, installer & PEMAgent to generate certificates using 4096 bit key, and modified SSLUtils to sign certificates using SHA256 Hash \[Support Ticket #989497] +PEM-3283 - PEMAgent service will not get disabled after upgrading \[Support Ticket #994563] PEM-3308 - Introduced new v3 version of REST API, which includes SNMP v3 support. -[Issue \#2172](https://redmine.postgresql.org/issues/2172) - Added search object functionality. -[Issue \#5154](https://redmine.postgresql.org/issues/5154) - Added accessibility support in AlertifyJS. -[Issue \#5179](https://redmine.postgresql.org/issues/5179) - Added Python 3.8 support. -[Issue \#5184](https://redmine.postgresql.org/issues/5184) - Added support for parameter toast\_tuple\_target and parallel\_workers of the table. -[Issue \#5261](https://redmine.postgresql.org/issues/5261) - Added support of Collation, FTS Configuration, FTS Dictionary, FTS Parser, and FTS Template to the Schema Diff. -[Issue \#5262](https://redmine.postgresql.org/issues/5262) - Added support of Domain, Domain Constraints and Types to the Schema Diff. -[Issue \#5263](https://redmine.postgresql.org/issues/5263) - Added support of Foreign Tables to the Schema Diff. -[Issue \#5264](https://redmine.postgresql.org/issues/5264) - Added support of Packages, Sequences and Synonyms to the Schema Diff. -[Issue \#5399](https://redmine.postgresql.org/issues/5399) - Warn the user if an unsupported, deprecated or unknown browser is detected. +[Issue #2172](https://redmine.postgresql.org/issues/2172) - Added search object functionality. +[Issue #5154](https://redmine.postgresql.org/issues/5154) - Added accessibility support in AlertifyJS. +[Issue #5179](https://redmine.postgresql.org/issues/5179) - Added Python 3.8 support. +[Issue #5184](https://redmine.postgresql.org/issues/5184) - Added support for parameter toast_tuple_target and parallel_workers of the table. +[Issue #5261](https://redmine.postgresql.org/issues/5261) - Added support of Collation, FTS Configuration, FTS Dictionary, FTS Parser, and FTS Template to the Schema Diff. +[Issue #5262](https://redmine.postgresql.org/issues/5262) - Added support of Domain, Domain Constraints and Types to the Schema Diff. +[Issue #5263](https://redmine.postgresql.org/issues/5263) - Added support of Foreign Tables to the Schema Diff. +[Issue #5264](https://redmine.postgresql.org/issues/5264) - Added support of Packages, Sequences and Synonyms to the Schema Diff. +[Issue #5399](https://redmine.postgresql.org/issues/5399) - Warn the user if an unsupported, deprecated or unknown browser is detected. ## Housekeeping -[Issue \#5133](https://redmine.postgresql.org/issues/5133) - Improvements in the UI for both default and dark themes. -[Issue \#5271](https://redmine.postgresql.org/issues/5271) - Enhance the color of switch control for both light and dark theme. -[Issue \#4620](https://redmine.postgresql.org/issues/4620) - Add Reverse Engineered and Modified SQL tests for procedures. +[Issue #5133](https://redmine.postgresql.org/issues/5133) - Improvements in the UI for both default and dark themes. +[Issue #5271](https://redmine.postgresql.org/issues/5271) - Enhance the color of switch control for both light and dark theme. +[Issue #4620](https://redmine.postgresql.org/issues/4620) - Add Reverse Engineered and Modified SQL tests for procedures. ## Bug fixes -PEM-965 - Fixed hanging issue after 'Enable Remote Monitoring?' prompted to user. \[Support Ticket \#771096\] +PEM-965 - Fixed hanging issue after 'Enable Remote Monitoring?' prompted to user. \[Support Ticket #771096] PEM-2500 - Corrected the version 3 of the certificate generated by sslutils -PEM-3129 - Update 'pem.bart\_backups' table with primary key as server id and backup id. \[Support Ticket \#972254\] -PEM-3183 - SQL profiler plugin was not able to load the profiler traces on ppcle machine \[Support Ticket \#991929\] +PEM-3129 - Update 'pem.bart_backups' table with primary key as server id and backup id. \[Support Ticket #972254] +PEM-3183 - SQL profiler plugin was not able to load the profiler traces on ppcle machine \[Support Ticket #991929] PEM-3202 - Allow user to open the job step logs in the new browser window as well as download it from schedule tasks. -PEM-3248 - Check for proper PEM schema version before running BART specific functions for backward compatibility \[Support Ticket \#981208\] -PEM-3272 - Fixed issue where SQL profiler filter dialog not able to display applied filter properly \[Support Ticket \#987042\] -PEM-3316 - Fixed PEMAgent service should not get disabled after upgrading \[Support Ticket \#994563\] -PEM-3323 - Fixed an issue where user was not able to change the email group from Agent dialog \[Support Ticket \#995734\] -[Issue \#1257](https://redmine.postgresql.org/issues/1257) - Ensure all object types have a "System XXX?" property. -[Issue \#2813](https://redmine.postgresql.org/issues/2813) - Ensure that the password prompt should not be visible if the database server is in trust authentication mode. -[Issue \#3495](https://redmine.postgresql.org/issues/3495) - Fixed an issue where the query tool unable to load the file which contains the BOM marker. -[Issue \#3523](https://redmine.postgresql.org/issues/3523) - Fixed an issue where right-clicking a browser object does not apply to the object on which right-click was fired. -[Issue \#3645](https://redmine.postgresql.org/issues/3645) - Ensure that the start and end date should be deleted when clear the selection for pgAgent Job. -[Issue \#3900](https://redmine.postgresql.org/issues/3900) - Added multiple drop/delete functionality for the table constraints. -[Issue \#3947](https://redmine.postgresql.org/issues/3947) - Fixed copy-paste row issues in View/Edit Data. -[Issue \#3972](https://redmine.postgresql.org/issues/3972) - Modified keyboard shortcuts in Query Tool for OSX native support. -[Issue \#3988](https://redmine.postgresql.org/issues/3988) - Fixed cursor disappeared issue in the query editor for some of the characters when zoomed out. -[Issue \#4180](https://redmine.postgresql.org/issues/4180) - Fixed mouse click issue where it does not select an object in Browser unless the pointer is over the object. -[Issue \#4237](https://redmine.postgresql.org/issues/4237) - Fix an issue where the user can not change the value of DateTime picker control using keyboard. -[Issue \#4440](https://redmine.postgresql.org/issues/4440) - Ensure the DROP statements in reverse engineered SQL are properly quoted for all objects. -[Issue \#4445](https://redmine.postgresql.org/issues/4445) - Ensure all object names in the title line of the reverse-engineered SQL are not quoted. -[Issue \#4504](https://redmine.postgresql.org/issues/4504) - Fixed an issue where like options should be disabled if the relation is not selected while creating a table. -[Issue \#4512](https://redmine.postgresql.org/issues/4512) - Fixed calendar opening issue on the exception tab inside the schedules tab of pgAgent. -[Issue \#4573](https://redmine.postgresql.org/issues/4573) - Ensure that if the delimiter is set other than comma then download the file as '.txt' file. -[Issue \#4608](https://redmine.postgresql.org/issues/4608) - Fixed some accessibility issues in the dialogs. -[Issue \#4684](https://redmine.postgresql.org/issues/4684) - Fixed encoding issue while saving data in encoded charset other than 'utf-8'. -[Issue \#4709](https://redmine.postgresql.org/issues/4709) - Added schema-qualified dictionary names in FTS configuration to avoid confusion of duplicate names. -[Issue \#4856](https://redmine.postgresql.org/issues/4856) - Enable the save button by default when a query tool is opened with CREATE or other scripts. -[Issue \#4858](https://redmine.postgresql.org/issues/4858) - Fixed python exception error when user tries to download the CSV and there is a connection issue. -[Issue \#4873](https://redmine.postgresql.org/issues/4873) - Fixed an issue when changing the comments of the procedure with arguments gives error in case of overloading. -[Issue \#4946](https://redmine.postgresql.org/issues/4946) - Fixed an issue when the user creates a temporary table with 'on commit drop as' clause. -[Issue \#4955](https://redmine.postgresql.org/issues/4955) - Changed the color of selected and hovered item for Select2 dropdown. -[Issue \#4957](https://redmine.postgresql.org/issues/4957) - Ensure that Constraint Trigger, Deferrable, Deferred option should be disabled when the user selects EDB-SPL function for the trigger. -[Issue \#4969](https://redmine.postgresql.org/issues/4969) - Fixed an issue where changing the values of columns with JSONB or JSON types to NULL. -[Issue \#4996](https://redmine.postgresql.org/issues/4996) - Improve the style of the highlighted code after query execution for Dark mode. -[Issue \#5007](https://redmine.postgresql.org/issues/5007) - Ensure index dropdown should have existing indexes while creating unique constraints. -[Issue \#5043](https://redmine.postgresql.org/issues/5043) - Fixed an issue where columns names should be visible in the order of their creation in the browser tree. -[Issue \#5053](https://redmine.postgresql.org/issues/5053) - Fixed an issue where changing the columns in the existing view throws an error. -[Issue \#5058](https://redmine.postgresql.org/issues/5058) - Ensure that AlertifyJS should not be visible as a title for alert dialog. -[Issue \#5077](https://redmine.postgresql.org/issues/5077) - Changed background pattern for geometry viewer to use \#fff for all themes. -[Issue \#5101](https://redmine.postgresql.org/issues/5101) - Fix an issue where debugger not showing all arguments anymore after hitting SQL error while debugging. -[Issue \#5115](https://redmine.postgresql.org/issues/5115) - Fix an issue where command and statements were parsed incorrectly for Rules. -[Issue \#5142](https://redmine.postgresql.org/issues/5142) - Ensure that all the transactions should be canceled before closing the connections when a server is disconnected using PEM. -[Issue \#5143](https://redmine.postgresql.org/issues/5143) - Fix accessibility issue for the maximize button of the Alertify dialog. -[Issue \#5157](https://redmine.postgresql.org/issues/5157) - Ensure that default sort order should be using the primary key in View/Edit data. -[Issue \#5180](https://redmine.postgresql.org/issues/5180) - Fixed an issue where the autovacuum\_enabled parameter is added automatically in the RE-SQL when the table has been created using the WITH clause. -[Issue \#5184](https://redmine.postgresql.org/issues/5184) - Fixed Firefox monospaced issue by updating the font to the latest version. -[Issue \#5213](https://redmine.postgresql.org/issues/5213) - Fixed an issue when the user performs refresh on a large size materialized view. -[Issue \#5214](https://redmine.postgresql.org/issues/5214) - Update Flask-SQLAlchemy and SQLAlchemy package which is not working on Windows with Python 3.8. -[Issue \#5215](https://redmine.postgresql.org/issues/5215) - Fix syntax error when changing the event type for the existing rule. -[Issue \#5221](https://redmine.postgresql.org/issues/5221) - Improve logic to get the DDL statements as a part of the comparison. -[Issue \#5227](https://redmine.postgresql.org/issues/5227) - Fixed an issue where user cannot be added if many users are already exists. -[Issue \#5241](https://redmine.postgresql.org/issues/5241) - Fixed tab key navigation issue for Grant Wizard. -[Issue \#5268](https://redmine.postgresql.org/issues/5268) - Fixed generated SQL when any token in FTS Configuration or any option in FTS Dictionary is changed. -[Issue \#5270](https://redmine.postgresql.org/issues/5270) - Ensure that OID should be shown in properties for Synonyms. -[Issue \#5275](https://redmine.postgresql.org/issues/5275) - Fixed tab key navigation issue for parameters in table dialog. -[Issue \#5279](https://redmine.postgresql.org/issues/5279) - Fixed Unicode character issue causing error on Python2 environment. -[Issue \#5302](https://redmine.postgresql.org/issues/5302) - Fixed an issue where difference SQL is not seen in the schema diff tool for Types. -[Issue \#5314](https://redmine.postgresql.org/issues/5314) - Ensure that switch cell is in sync with switch control for accessibility. -[Issue \#5315](https://redmine.postgresql.org/issues/5315) - Fixed an issue where schema diff showing changes in the identical domain constraints. -[Issue \#5350](https://redmine.postgresql.org/issues/5350) - Fixed an issue where schema diff marks an identical table as different. -[Issue \#5352](https://redmine.postgresql.org/issues/5352) - Fixed the rightmost and bottom tooltip crop issues in the explain query plan. -[Issue \#5356](https://redmine.postgresql.org/issues/5356) - Fixed modified SQL issue while adding an exception in pgAgent job schedule. -[Issue \#5361](https://redmine.postgresql.org/issues/5361) - Fixes an issue where PEM GUI does not display properly in IE 11. -[Issue \#5362](https://redmine.postgresql.org/issues/5362) - Fixed an issue where the identical packages and sequences visible as different in the schema diff tool. -[Issue \#5366](https://redmine.postgresql.org/issues/5366) - Added alert message to Reset Layout if any of the panels from Query Tool failed to load. -[Issue \#5371](https://redmine.postgresql.org/issues/5371) - Fixed tab key navigation for some dialogs. -[Issue \#5375](https://redmine.postgresql.org/issues/5375) - Fixed an issue where the Mode cell of argument grid does not appear completely in the Functions dialog. -[Issue \#5383](https://redmine.postgresql.org/issues/5383) - Fixed syntax error while refreshing the existing synonyms. -[Issue \#5387](https://redmine.postgresql.org/issues/5387) - Fixed an issue where the mode is not shown in the properties dialog of functions/procedures if all the arguments are "IN" arguments. -[Issue \#5396](https://redmine.postgresql.org/issues/5396) - Fixed an issue where the search object module unable to locate the object in the browser tree. -[Issue \#5400](https://redmine.postgresql.org/issues/5400) - Fixed internal server error when the database server is logged in with non-super user. -[Issue \#5401](https://redmine.postgresql.org/issues/5401) - Fixed search object issue when the object name contains special characters. -[Issue \#5410](https://redmine.postgresql.org/issues/5410) - Fixed an issue while removing the package body showing wrong modified SQL. -[Issue \#5441](https://redmine.postgresql.org/issues/5441) - Fixed an issue where the search object not able to locate pg\_[toast]()\* tables in the pg\_toast schema. -[Issue \#5415](https://redmine.postgresql.org/issues/5415) - Ensure that the query tool context menu should work on the collection nodes. -[Issue \#5447](https://redmine.postgresql.org/issues/5447) - Fixed failed to fetch utility error when click on refresh(any option) materialized view. +PEM-3248 - Check for proper PEM schema version before running BART specific functions for backward compatibility \[Support Ticket #981208] +PEM-3272 - Fixed issue where SQL profiler filter dialog not able to display applied filter properly \[Support Ticket #987042] +PEM-3316 - Fixed PEMAgent service should not get disabled after upgrading \[Support Ticket #994563] +PEM-3323 - Fixed an issue where user was not able to change the email group from Agent dialog \[Support Ticket #995734] +[Issue #1257](https://redmine.postgresql.org/issues/1257) - Ensure all object types have a "System XXX?" property. +[Issue #2813](https://redmine.postgresql.org/issues/2813) - Ensure that the password prompt should not be visible if the database server is in trust authentication mode. +[Issue #3495](https://redmine.postgresql.org/issues/3495) - Fixed an issue where the query tool unable to load the file which contains the BOM marker. +[Issue #3523](https://redmine.postgresql.org/issues/3523) - Fixed an issue where right-clicking a browser object does not apply to the object on which right-click was fired. +[Issue #3645](https://redmine.postgresql.org/issues/3645) - Ensure that the start and end date should be deleted when clear the selection for pgAgent Job. +[Issue #3900](https://redmine.postgresql.org/issues/3900) - Added multiple drop/delete functionality for the table constraints. +[Issue #3947](https://redmine.postgresql.org/issues/3947) - Fixed copy-paste row issues in View/Edit Data. +[Issue #3972](https://redmine.postgresql.org/issues/3972) - Modified keyboard shortcuts in Query Tool for OSX native support. +[Issue #3988](https://redmine.postgresql.org/issues/3988) - Fixed cursor disappeared issue in the query editor for some of the characters when zoomed out. +[Issue #4180](https://redmine.postgresql.org/issues/4180) - Fixed mouse click issue where it does not select an object in Browser unless the pointer is over the object. +[Issue #4237](https://redmine.postgresql.org/issues/4237) - Fix an issue where the user can not change the value of DateTime picker control using keyboard. +[Issue #4440](https://redmine.postgresql.org/issues/4440) - Ensure the DROP statements in reverse engineered SQL are properly quoted for all objects. +[Issue #4445](https://redmine.postgresql.org/issues/4445) - Ensure all object names in the title line of the reverse-engineered SQL are not quoted. +[Issue #4504](https://redmine.postgresql.org/issues/4504) - Fixed an issue where like options should be disabled if the relation is not selected while creating a table. +[Issue #4512](https://redmine.postgresql.org/issues/4512) - Fixed calendar opening issue on the exception tab inside the schedules tab of pgAgent. +[Issue #4573](https://redmine.postgresql.org/issues/4573) - Ensure that if the delimiter is set other than comma then download the file as '.txt' file. +[Issue #4608](https://redmine.postgresql.org/issues/4608) - Fixed some accessibility issues in the dialogs. +[Issue #4684](https://redmine.postgresql.org/issues/4684) - Fixed encoding issue while saving data in encoded charset other than 'utf-8'. +[Issue #4709](https://redmine.postgresql.org/issues/4709) - Added schema-qualified dictionary names in FTS configuration to avoid confusion of duplicate names. +[Issue #4856](https://redmine.postgresql.org/issues/4856) - Enable the save button by default when a query tool is opened with CREATE or other scripts. +[Issue #4858](https://redmine.postgresql.org/issues/4858) - Fixed python exception error when user tries to download the CSV and there is a connection issue. +[Issue #4873](https://redmine.postgresql.org/issues/4873) - Fixed an issue when changing the comments of the procedure with arguments gives error in case of overloading. +[Issue #4946](https://redmine.postgresql.org/issues/4946) - Fixed an issue when the user creates a temporary table with 'on commit drop as' clause. +[Issue #4955](https://redmine.postgresql.org/issues/4955) - Changed the color of selected and hovered item for Select2 dropdown. +[Issue #4957](https://redmine.postgresql.org/issues/4957) - Ensure that Constraint Trigger, Deferrable, Deferred option should be disabled when the user selects EDB-SPL function for the trigger. +[Issue #4969](https://redmine.postgresql.org/issues/4969) - Fixed an issue where changing the values of columns with JSONB or JSON types to NULL. +[Issue #4996](https://redmine.postgresql.org/issues/4996) - Improve the style of the highlighted code after query execution for Dark mode. +[Issue #5007](https://redmine.postgresql.org/issues/5007) - Ensure index dropdown should have existing indexes while creating unique constraints. +[Issue #5043](https://redmine.postgresql.org/issues/5043) - Fixed an issue where columns names should be visible in the order of their creation in the browser tree. +[Issue #5053](https://redmine.postgresql.org/issues/5053) - Fixed an issue where changing the columns in the existing view throws an error. +[Issue #5058](https://redmine.postgresql.org/issues/5058) - Ensure that AlertifyJS should not be visible as a title for alert dialog. +[Issue #5077](https://redmine.postgresql.org/issues/5077) - Changed background pattern for geometry viewer to use #fff for all themes. +[Issue #5101](https://redmine.postgresql.org/issues/5101) - Fix an issue where debugger not showing all arguments anymore after hitting SQL error while debugging. +[Issue #5115](https://redmine.postgresql.org/issues/5115) - Fix an issue where command and statements were parsed incorrectly for Rules. +[Issue #5142](https://redmine.postgresql.org/issues/5142) - Ensure that all the transactions should be canceled before closing the connections when a server is disconnected using PEM. +[Issue #5143](https://redmine.postgresql.org/issues/5143) - Fix accessibility issue for the maximize button of the Alertify dialog. +[Issue #5157](https://redmine.postgresql.org/issues/5157) - Ensure that default sort order should be using the primary key in View/Edit data. +[Issue #5180](https://redmine.postgresql.org/issues/5180) - Fixed an issue where the autovacuum_enabled parameter is added automatically in the RE-SQL when the table has been created using the WITH clause. +[Issue #5184](https://redmine.postgresql.org/issues/5184) - Fixed Firefox monospaced issue by updating the font to the latest version. +[Issue #5213](https://redmine.postgresql.org/issues/5213) - Fixed an issue when the user performs refresh on a large size materialized view. +[Issue #5214](https://redmine.postgresql.org/issues/5214) - Update Flask-SQLAlchemy and SQLAlchemy package which is not working on Windows with Python 3.8. +[Issue #5215](https://redmine.postgresql.org/issues/5215) - Fix syntax error when changing the event type for the existing rule. +[Issue #5221](https://redmine.postgresql.org/issues/5221) - Improve logic to get the DDL statements as a part of the comparison. +[Issue #5227](https://redmine.postgresql.org/issues/5227) - Fixed an issue where user cannot be added if many users are already exists. +[Issue #5241](https://redmine.postgresql.org/issues/5241) - Fixed tab key navigation issue for Grant Wizard. +[Issue #5268](https://redmine.postgresql.org/issues/5268) - Fixed generated SQL when any token in FTS Configuration or any option in FTS Dictionary is changed. +[Issue #5270](https://redmine.postgresql.org/issues/5270) - Ensure that OID should be shown in properties for Synonyms. +[Issue #5275](https://redmine.postgresql.org/issues/5275) - Fixed tab key navigation issue for parameters in table dialog. +[Issue #5279](https://redmine.postgresql.org/issues/5279) - Fixed Unicode character issue causing error on Python2 environment. +[Issue #5302](https://redmine.postgresql.org/issues/5302) - Fixed an issue where difference SQL is not seen in the schema diff tool for Types. +[Issue #5314](https://redmine.postgresql.org/issues/5314) - Ensure that switch cell is in sync with switch control for accessibility. +[Issue #5315](https://redmine.postgresql.org/issues/5315) - Fixed an issue where schema diff showing changes in the identical domain constraints. +[Issue #5350](https://redmine.postgresql.org/issues/5350) - Fixed an issue where schema diff marks an identical table as different. +[Issue #5352](https://redmine.postgresql.org/issues/5352) - Fixed the rightmost and bottom tooltip crop issues in the explain query plan. +[Issue #5356](https://redmine.postgresql.org/issues/5356) - Fixed modified SQL issue while adding an exception in pgAgent job schedule. +[Issue #5361](https://redmine.postgresql.org/issues/5361) - Fixes an issue where PEM GUI does not display properly in IE 11. +[Issue #5362](https://redmine.postgresql.org/issues/5362) - Fixed an issue where the identical packages and sequences visible as different in the schema diff tool. +[Issue #5366](https://redmine.postgresql.org/issues/5366) - Added alert message to Reset Layout if any of the panels from Query Tool failed to load. +[Issue #5371](https://redmine.postgresql.org/issues/5371) - Fixed tab key navigation for some dialogs. +[Issue #5375](https://redmine.postgresql.org/issues/5375) - Fixed an issue where the Mode cell of argument grid does not appear completely in the Functions dialog. +[Issue #5383](https://redmine.postgresql.org/issues/5383) - Fixed syntax error while refreshing the existing synonyms. +[Issue #5387](https://redmine.postgresql.org/issues/5387) - Fixed an issue where the mode is not shown in the properties dialog of functions/procedures if all the arguments are "IN" arguments. +[Issue #5396](https://redmine.postgresql.org/issues/5396) - Fixed an issue where the search object module unable to locate the object in the browser tree. +[Issue #5400](https://redmine.postgresql.org/issues/5400) - Fixed internal server error when the database server is logged in with non-super user. +[Issue #5401](https://redmine.postgresql.org/issues/5401) - Fixed search object issue when the object name contains special characters. +[Issue #5410](https://redmine.postgresql.org/issues/5410) - Fixed an issue while removing the package body showing wrong modified SQL. +[Issue #5441](https://redmine.postgresql.org/issues/5441) - Fixed an issue where the search object not able to locate pg\_[toast](<>)\* tables in the pg_toast schema. +[Issue #5415](https://redmine.postgresql.org/issues/5415) - Ensure that the query tool context menu should work on the collection nodes. +[Issue #5447](https://redmine.postgresql.org/issues/5447) - Fixed failed to fetch utility error when click on refresh(any option) materialized view. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/06_pem_release_notes_7_13.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/06_pem_release_notes_7_13.mdx index 8dd2f0baff4..d4139f7d4e7 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/06_pem_release_notes_7_13.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/06_pem_release_notes_7_13.mdx @@ -2,8 +2,6 @@ title: "PEM v7.13" --- - - Release date: 2020-02-20 ## Features @@ -11,64 +9,64 @@ Release date: 2020-02-20 PEM-594 - Logout the user session when there is no user activity. PEM-3107 - Core usage report. PEM-2794 - Added capability in pemAgent to send SNMP trap with SNMP v3 support. -[Issue \#2554](https://redmine.postgresql.org/issues/2554) - Added support for a multi-level partitioned table. -[Issue \#3452](https://redmine.postgresql.org/issues/3452) - Added a Schema Diff tool to compare two schemas and generate the diff script. -[Issue \#4762](https://redmine.postgresql.org/issues/4762) - Allow screen-reader to read label & description of non-textable elements. -[Issue \#4763](https://redmine.postgresql.org/issues/4763) - Allow screen-reader to identify the alert errors. -[Issue \#4764](https://redmine.postgresql.org/issues/4764) - Allow screen-reader to read relationship attributes in nested elements. -[Issue \#4770](https://redmine.postgresql.org/issues/4770) - Added labels and titles after parsing and validating all the web pages for accessibility. -[Issue \#4993](https://redmine.postgresql.org/issues/4993) - Set input controls as read-only instead of disabled will allow tab navigation in the properties tab and also allow screen readers to read it. +[Issue #2554](https://redmine.postgresql.org/issues/2554) - Added support for a multi-level partitioned table. +[Issue #3452](https://redmine.postgresql.org/issues/3452) - Added a Schema Diff tool to compare two schemas and generate the diff script. +[Issue #4762](https://redmine.postgresql.org/issues/4762) - Allow screen-reader to read label & description of non-textable elements. +[Issue #4763](https://redmine.postgresql.org/issues/4763) - Allow screen-reader to identify the alert errors. +[Issue #4764](https://redmine.postgresql.org/issues/4764) - Allow screen-reader to read relationship attributes in nested elements. +[Issue #4770](https://redmine.postgresql.org/issues/4770) - Added labels and titles after parsing and validating all the web pages for accessibility. +[Issue #4993](https://redmine.postgresql.org/issues/4993) - Set input controls as read-only instead of disabled will allow tab navigation in the properties tab and also allow screen readers to read it. ## Housekeeping -[Issue \#4988](https://redmine.postgresql.org/issues/4988) - Refactored SQL of Table's and it's child nodes. -[Issue \#5023](https://redmine.postgresql.org/issues/5023) - Refactored SQL of Views and Materialized Views. -[Issue \#5024](https://redmine.postgresql.org/issues/5024) - Refactored SQL of Functions and Procedures. -[Issue \#5038](https://redmine.postgresql.org/issues/5038) - Added support for on-demand loading of items in Select2. -[Issue \#5049](https://redmine.postgresql.org/issues/5049) - Improve code coverage and API test cases for the CAST module. -[Issue \#5050](https://redmine.postgresql.org/issues/5050) - Improve code coverage and API test cases for the LANGUAGE module. -[Issue \#5072](https://redmine.postgresql.org/issues/5072) - Updated wcDocker package which includes aria-label accessibility improvements. -[Issue \#5088](https://redmine.postgresql.org/issues/5088) - Improve code coverage and API test cases for the Event Trigger module. -[Issue \#5096](https://redmine.postgresql.org/issues/5096) - Replace node-sass with sass for SCSS compilation. -[Issue \#5176](https://redmine.postgresql.org/issues/5176) - Enhance logging by tracking stdout and stderr of subprocess when log level set to DEBUG. -[Issue \#5185](https://redmine.postgresql.org/issues/5185) - Added option to override the class name of a label tag for select2 control. +[Issue #4988](https://redmine.postgresql.org/issues/4988) - Refactored SQL of Table's and it's child nodes. +[Issue #5023](https://redmine.postgresql.org/issues/5023) - Refactored SQL of Views and Materialized Views. +[Issue #5024](https://redmine.postgresql.org/issues/5024) - Refactored SQL of Functions and Procedures. +[Issue #5038](https://redmine.postgresql.org/issues/5038) - Added support for on-demand loading of items in Select2. +[Issue #5049](https://redmine.postgresql.org/issues/5049) - Improve code coverage and API test cases for the CAST module. +[Issue #5050](https://redmine.postgresql.org/issues/5050) - Improve code coverage and API test cases for the LANGUAGE module. +[Issue #5072](https://redmine.postgresql.org/issues/5072) - Updated wcDocker package which includes aria-label accessibility improvements. +[Issue #5088](https://redmine.postgresql.org/issues/5088) - Improve code coverage and API test cases for the Event Trigger module. +[Issue #5096](https://redmine.postgresql.org/issues/5096) - Replace node-sass with sass for SCSS compilation. +[Issue #5176](https://redmine.postgresql.org/issues/5176) - Enhance logging by tracking stdout and stderr of subprocess when log level set to DEBUG. +[Issue #5185](https://redmine.postgresql.org/issues/5185) - Added option to override the class name of a label tag for select2 control. ## Bug fixes PEM-383 - Provide the column picker toolbar button in SQL profiler trace window. -PEM-723 - Fixed a SQL Syntax error when PEM Agent queries tablespaces. \[Support Ticket \#570926\] -PEM-789 - Fixed historical span issue with memory usage graph. \[Support Ticket \#665140\] -PEM-933 - Fixed sorting of table chart data when rendering on custom dashboard. \[Support Ticket \#764647\] -PEM-1406 - Fixed the Swap Consumption alert template to reflect the actual swap consumption instead of total swap space. \[Support Ticket \#806008\] -PEM-2255 - Fixed in PEM RestAPI for server black out option. \[Support Ticket \#882589\] -PEM-2545/Issue \#4511 <https://redmine.postgresql.org/issues/4511> - Grant wizard is not able to handle multiple objects because all objects are passed as query string URL \[Support Ticket \#897419\] -PEM-2573 - Fixed for shared\_buffers dashboard's historical span issue. \[Support Ticket \#894867\] -PEM-2614 - Fixed idle in transaction (aborted) state check in the probe code for session info. \[Support Ticket \#908442\] -PEM-2647 - Fixed storage pie chart issue to display the correct value. \[Support Ticket \#907753\] -PEM-2686 - Fixed save password functionality is not working when server was registered by PEM worker. \[Support Ticket \#912948\] -PEM-2828 - Fixed "Alert Errors" alert template to count the errors for which alerts are enabled. \[Support Ticket \#903272\] -PEM-3041 - Fixed not able to see the agent status in the PEM Monitoring dashboard due to numeric overflow error. \[Support Ticket \#934778\] -PEM-3139 - RLS policy was not updated properly for "pem.job" table during the PEM upgrade. \[Support Ticket \#937823\] -[Issue \#3812](https://redmine.postgresql.org/issues/3812) - Ensure that path file name should not disappear when changing ext from the dropdown in file explorer dialog. -[Issue \#4198](https://redmine.postgresql.org/issues/4198) - Fix syntax highlighting in code mirror for backslash and escape constant. -[Issue \#4410](https://redmine.postgresql.org/issues/4410) - Fixed an issue while editing char\[\] or character varying\[\] column from View/Edit data throwing an error. -[Issue \#4506](https://redmine.postgresql.org/issues/4506) - Fix an issue where clicking on an empty textbox like fill factor or comments, considers it as change and enabled the save button. -[Issue \#4601](https://redmine.postgresql.org/issues/4601) - Added tab navigation on close buttons for all the panels and create/properties dialog. -[Issue \#4633](https://redmine.postgresql.org/issues/4633) - Added support to view multilevel partitioned tables. -[Issue \#4724](https://redmine.postgresql.org/issues/4724) - Fix network disconnect issue while establishing the connection via SSH Tunnel and it impossible to expand the Servers node. -[Issue \#4818](https://redmine.postgresql.org/issues/4818) - Fix server connection drops out issue in query tool. -[Issue \#4827](https://redmine.postgresql.org/issues/4827) - Fix column resizable issue in the file explorer dialog. -[Issue \#4842](https://redmine.postgresql.org/issues/4842) - Ensure that constraints, indexes, rules, triggers, and compound triggers should be created on partitions. -[Issue \#4926](https://redmine.postgresql.org/issues/4926) - Fix VPN network disconnect issue where it hangs on expanding the Servers node. -[Issue \#4933](https://redmine.postgresql.org/issues/4933) - Ensure that the Servers collection node should expand independently of server connections. -[Issue \#4943](https://redmine.postgresql.org/issues/4943) - Added more information to the 'Database connected/disconnected' message. -[Issue \#5000](https://redmine.postgresql.org/issues/5000) - Logout the session when no user activity of mouse move, click or keypress. -[Issue \#5008](https://redmine.postgresql.org/issues/5008) - Ensure that the error message should not be displayed if Tablespace is not selected while creating the index. -[Issue \#5009](https://redmine.postgresql.org/issues/5009) - Fix an issue where operator, access method and operator class is not visible for exclusion constraints. -[Issue \#5025](https://redmine.postgresql.org/issues/5025) - Fix an issue where setting STORAGE\_DIR to empty should show all the volumes on Windows in server mode. -[Issue \#5047](https://redmine.postgresql.org/issues/5047) - Added tab navigation for tabs under explain panel in query tool. -[Issue \#5065](https://redmine.postgresql.org/issues/5065) - Updated the incorrect icon used for the cast node on refresh. -[Issue \#5066](https://redmine.postgresql.org/issues/5066) - Fix an issue where refreshing a package results in the change in the object completely. -[Issue \#5074](https://redmine.postgresql.org/issues/5074) - Fix an issue where select, insert and update scripts on tables throwing an error. -[Issue \#5107](https://redmine.postgresql.org/issues/5107) - Set proper focus on tab navigation for file manager dialog. -[Issue \#5116](https://redmine.postgresql.org/issues/5116) - Fixed an issue where Save Password control disappears after clicking on it while creating a server. +PEM-723 - Fixed a SQL Syntax error when PEM Agent queries tablespaces. \[Support Ticket #570926] +PEM-789 - Fixed historical span issue with memory usage graph. \[Support Ticket #665140] +PEM-933 - Fixed sorting of table chart data when rendering on custom dashboard. \[Support Ticket #764647] +PEM-1406 - Fixed the Swap Consumption alert template to reflect the actual swap consumption instead of total swap space. \[Support Ticket #806008] +PEM-2255 - Fixed in PEM RestAPI for server black out option. \[Support Ticket #882589] +PEM-2545/Issue #4511 <> - Grant wizard is not able to handle multiple objects because all objects are passed as query string URL \[Support Ticket #897419] +PEM-2573 - Fixed for shared_buffers dashboard's historical span issue. \[Support Ticket #894867] +PEM-2614 - Fixed idle in transaction (aborted) state check in the probe code for session info. \[Support Ticket #908442] +PEM-2647 - Fixed storage pie chart issue to display the correct value. \[Support Ticket #907753] +PEM-2686 - Fixed save password functionality is not working when server was registered by PEM worker. \[Support Ticket #912948] +PEM-2828 - Fixed "Alert Errors" alert template to count the errors for which alerts are enabled. \[Support Ticket #903272] +PEM-3041 - Fixed not able to see the agent status in the PEM Monitoring dashboard due to numeric overflow error. \[Support Ticket #934778] +PEM-3139 - RLS policy was not updated properly for "pem.job" table during the PEM upgrade. \[Support Ticket #937823] +[Issue #3812](https://redmine.postgresql.org/issues/3812) - Ensure that path file name should not disappear when changing ext from the dropdown in file explorer dialog. +[Issue #4198](https://redmine.postgresql.org/issues/4198) - Fix syntax highlighting in code mirror for backslash and escape constant. +[Issue #4410](https://redmine.postgresql.org/issues/4410) - Fixed an issue while editing char\[] or character varying\[] column from View/Edit data throwing an error. +[Issue #4506](https://redmine.postgresql.org/issues/4506) - Fix an issue where clicking on an empty textbox like fill factor or comments, considers it as change and enabled the save button. +[Issue #4601](https://redmine.postgresql.org/issues/4601) - Added tab navigation on close buttons for all the panels and create/properties dialog. +[Issue #4633](https://redmine.postgresql.org/issues/4633) - Added support to view multilevel partitioned tables. +[Issue #4724](https://redmine.postgresql.org/issues/4724) - Fix network disconnect issue while establishing the connection via SSH Tunnel and it impossible to expand the Servers node. +[Issue #4818](https://redmine.postgresql.org/issues/4818) - Fix server connection drops out issue in query tool. +[Issue #4827](https://redmine.postgresql.org/issues/4827) - Fix column resizable issue in the file explorer dialog. +[Issue #4842](https://redmine.postgresql.org/issues/4842) - Ensure that constraints, indexes, rules, triggers, and compound triggers should be created on partitions. +[Issue #4926](https://redmine.postgresql.org/issues/4926) - Fix VPN network disconnect issue where it hangs on expanding the Servers node. +[Issue #4933](https://redmine.postgresql.org/issues/4933) - Ensure that the Servers collection node should expand independently of server connections. +[Issue #4943](https://redmine.postgresql.org/issues/4943) - Added more information to the 'Database connected/disconnected' message. +[Issue #5000](https://redmine.postgresql.org/issues/5000) - Logout the session when no user activity of mouse move, click or keypress. +[Issue #5008](https://redmine.postgresql.org/issues/5008) - Ensure that the error message should not be displayed if Tablespace is not selected while creating the index. +[Issue #5009](https://redmine.postgresql.org/issues/5009) - Fix an issue where operator, access method and operator class is not visible for exclusion constraints. +[Issue #5025](https://redmine.postgresql.org/issues/5025) - Fix an issue where setting STORAGE_DIR to empty should show all the volumes on Windows in server mode. +[Issue #5047](https://redmine.postgresql.org/issues/5047) - Added tab navigation for tabs under explain panel in query tool. +[Issue #5065](https://redmine.postgresql.org/issues/5065) - Updated the incorrect icon used for the cast node on refresh. +[Issue #5066](https://redmine.postgresql.org/issues/5066) - Fix an issue where refreshing a package results in the change in the object completely. +[Issue #5074](https://redmine.postgresql.org/issues/5074) - Fix an issue where select, insert and update scripts on tables throwing an error. +[Issue #5107](https://redmine.postgresql.org/issues/5107) - Set proper focus on tab navigation for file manager dialog. +[Issue #5116](https://redmine.postgresql.org/issues/5116) - Fixed an issue where Save Password control disappears after clicking on it while creating a server. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/07_pem_release_notes_7_12.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/07_pem_release_notes_7_12.mdx index b3a5ee8bfcb..9b9728470f4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/07_pem_release_notes_7_12.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/07_pem_release_notes_7_12.mdx @@ -2,77 +2,75 @@ title: "PEM v7.12" --- - - Release date: 2019-12-02 ## Features PEM-2477 - Allow the pemAgent to create a password-less SSH authentication between two linux systems. User can now choose to create it from the database server properties dialog, and backup restore dialog. -PEM-2848 - Allow the pemAgent to set/override the 'archive\_command' of the database server configuration using 'BART INIT' command. User can choose to set/override the 'archive\_command' from the database server properties dialog. -[Issue \#1974](https://redmine.postgresql.org/issues/1974) - Added encrypted password in reverse engineered SQL for roles. -[Issue \#3741](https://redmine.postgresql.org/issues/3741) - Added Dark (Beta) UI Theme option. -[Issue \#4006](https://redmine.postgresql.org/issues/4006) - Support Enable Always and Enable Replica on triggers. -[Issue \#4351](https://redmine.postgresql.org/issues/4351) - Add an option to request confirmation before cancelling changes on a Properties dialog. -[Issue \#4396](https://redmine.postgresql.org/issues/4396) - Warn the user on changing the definition of Materialized View about the loss of data and its dependent objects. -[Issue \#4435](https://redmine.postgresql.org/issues/4435) - Allow drag and drop functionality for all the nodes under the database node, excluding collection nodes. -[Issue \#4711](https://redmine.postgresql.org/issues/4711) - Use a 'play' icon for the Execute Query button in the Query Tool for greater consistency with other applications. -[Issue \#4772](https://redmine.postgresql.org/issues/4772) - Added aria-label to provide an invisible label where a visible label cannot be used. -[Issue \#4773](https://redmine.postgresql.org/issues/4773) - Added role="status" attribute to all the status messages for accessibility. -[Issue \#4990](https://redmine.postgresql.org/issues/4990) - Changed the open query tool and data filter icons. +PEM-2848 - Allow the pemAgent to set/override the 'archive_command' of the database server configuration using 'BART INIT' command. User can choose to set/override the 'archive_command' from the database server properties dialog. +[Issue #1974](https://redmine.postgresql.org/issues/1974) - Added encrypted password in reverse engineered SQL for roles. +[Issue #3741](https://redmine.postgresql.org/issues/3741) - Added Dark (Beta) UI Theme option. +[Issue #4006](https://redmine.postgresql.org/issues/4006) - Support Enable Always and Enable Replica on triggers. +[Issue #4351](https://redmine.postgresql.org/issues/4351) - Add an option to request confirmation before cancelling changes on a Properties dialog. +[Issue #4396](https://redmine.postgresql.org/issues/4396) - Warn the user on changing the definition of Materialized View about the loss of data and its dependent objects. +[Issue #4435](https://redmine.postgresql.org/issues/4435) - Allow drag and drop functionality for all the nodes under the database node, excluding collection nodes. +[Issue #4711](https://redmine.postgresql.org/issues/4711) - Use a 'play' icon for the Execute Query button in the Query Tool for greater consistency with other applications. +[Issue #4772](https://redmine.postgresql.org/issues/4772) - Added aria-label to provide an invisible label where a visible label cannot be used. +[Issue #4773](https://redmine.postgresql.org/issues/4773) - Added role="status" attribute to all the status messages for accessibility. +[Issue #4990](https://redmine.postgresql.org/issues/4990) - Changed the open query tool and data filter icons. ## Housekeeping -[Issue \#4696](https://redmine.postgresql.org/issues/4696) - Add Reverse Engineered and Modified SQL tests for Materialized Views. -[Issue \#4701](https://redmine.postgresql.org/issues/4701) - Optimize Webpack to improve overall performance. -[Issue \#4807](https://redmine.postgresql.org/issues/4807) - Refactored code of table and it's child nodes. -[Issue \#4938](https://redmine.postgresql.org/issues/4938) - Refactored code of columns node. +[Issue #4696](https://redmine.postgresql.org/issues/4696) - Add Reverse Engineered and Modified SQL tests for Materialized Views. +[Issue #4701](https://redmine.postgresql.org/issues/4701) - Optimize Webpack to improve overall performance. +[Issue #4807](https://redmine.postgresql.org/issues/4807) - Refactored code of table and it's child nodes. +[Issue #4938](https://redmine.postgresql.org/issues/4938) - Refactored code of columns node. ## Bug fixes -[Issue \#3130](https://redmine.postgresql.org/issues/3130) - Ensure create new object dialog should be opened when alt+shift+n key is pressed on the collection node. -[Issue \#3279](https://redmine.postgresql.org/issues/3279) - Fixed issue where Drop and Disconnect connection menu points are too close to each other. -[Issue \#3538](https://redmine.postgresql.org/issues/3538) - Fix issue where the Reset button does not get enabled till all the mandatory fields are provided in the dialog. -[Issue \#3789](https://redmine.postgresql.org/issues/3789) - Ensure context menus never get hidden below the menu bar. -[Issue \#3859](https://redmine.postgresql.org/issues/3859) - Rename the context menu from 'Drop Server' to 'Remove Server'. -[Issue \#3913](https://redmine.postgresql.org/issues/3913) - Ensure the correct "running at" agent is shown when a pgAgent job is executing. -[Issue \#3915](https://redmine.postgresql.org/issues/3915) - Fix an issue in the Query Tool where shortcut keys could be ignored following a query error. -[Issue \#3999](https://redmine.postgresql.org/issues/3999) - Fix the toggle case shortcut key combination. -[Issue \#4191](https://redmine.postgresql.org/issues/4191) - Ensure comments are shown in reverse engineered SQL for table partitions. -[Issue \#4242](https://redmine.postgresql.org/issues/4242) - Handle NULL values appropriately when sorting backgrid tables. -[Issue \#4341](https://redmine.postgresql.org/issues/4341) - Give appropriate error messages when the user tries to use an blank master password. -[Issue \#4451](https://redmine.postgresql.org/issues/4451) - Remove arbitrary (and incorrect) requirement that composite types must have at least two members. -[Issue \#4459](https://redmine.postgresql.org/issues/4459) - Don't quote bigints when copying them from the Query Tool results grid. -[Issue \#4482](https://redmine.postgresql.org/issues/4482) - Ensure compression level is passed to pg\_dump when backing up in directory format. -[Issue \#4483](https://redmine.postgresql.org/issues/4483) - Ensure the number of jobs can be specified when backing up in directory format. -[Issue \#4516](https://redmine.postgresql.org/issues/4516) - Remove the sorting of table headers with no labels. -[Issue \#4564](https://redmine.postgresql.org/issues/4564) - Ensure Javascript errors during Query Tool execution are reported as such and not as Ajax errors. -[Issue \#4610](https://redmine.postgresql.org/issues/4610) - Suppress Enter key presses in Alertify dialogues when the come from Select2 controls to allow item selection with Enter. -[Issue \#4647](https://redmine.postgresql.org/issues/4647) - Ensure that units are respected when sorting by file size in the File dialog. -[Issue \#4659](https://redmine.postgresql.org/issues/4659) - Updated documentation for default privileges to clarify more on the grantor. -[Issue \#4674](https://redmine.postgresql.org/issues/4674) - Fix query tool launch error if user name contains HTML characters. It's a regression. -[Issue \#4730](https://redmine.postgresql.org/issues/4730) - Ensure all messages are retained in the Query Tool from long running queries. -[Issue \#4734](https://redmine.postgresql.org/issues/4734) - Updated documentation for the delete row button that only strikeout the row instead of deleting it. -[Issue \#4761](https://redmine.postgresql.org/issues/4761) - Fix an issue where the wrong type is displayed when changing the datatype from timestamp with time zone to timestamp without time zone. -[Issue \#4779](https://redmine.postgresql.org/issues/4779) - Updated documentation for the query tool toolbar buttons. -[Issue \#4792](https://redmine.postgresql.org/issues/4792) - Ensure that the superuser should be able to create database, as the superuser overrides all the access restrictions. -[Issue \#4878](https://redmine.postgresql.org/issues/4878) - Ensure that the superuser should be able to create role, as the superuser overrides all the access restrictions. -[Issue \#4893](https://redmine.postgresql.org/issues/4893) - Fix reverse engineering SQL issue for partitions when specifying digits as comments. -[Issue \#4896](https://redmine.postgresql.org/issues/4896) - Fixed an issue where escape key not working to close the open/save file dialog. -[Issue \#4906](https://redmine.postgresql.org/issues/4906) - Fixed an issue where keyboard shortcut for context menu is not working when using Firefox on CentOS7. -[Issue \#4923](https://redmine.postgresql.org/issues/4923) - Enhance the logic to change the label from 'Delete/Drop' to 'Remove' for the server and server group node. -[Issue \#4925](https://redmine.postgresql.org/issues/4925) - Shown some text on process watcher till the initial logs are loaded. -[Issue \#4930](https://redmine.postgresql.org/issues/4930) - Fix main window tab navigation accessibility issue. -[Issue \#4935](https://redmine.postgresql.org/issues/4935) - Fix accessibility issues. -[Issue \#4947](https://redmine.postgresql.org/issues/4947) - Fix XSS issue in explain and explain analyze for table and type which contain HTML. -[Issue \#4952](https://redmine.postgresql.org/issues/4952) - Fix an issue of retrieving properties for Compound Triggers. It's a regression of \#4006. -[Issue \#4953](https://redmine.postgresql.org/issues/4953) - Fix an issue where PEM unable to retrieve table node if the trigger is already disabled and the user clicks on Enable All. -[Issue \#4958](https://redmine.postgresql.org/issues/4958) - Fix reverse engineering SQL issue for triggers when passed a single argument to trigger function. -[Issue \#4964](https://redmine.postgresql.org/issues/4964) - Fix an issue where length and precision are not removed from table/column dialog. -[Issue \#4965](https://redmine.postgresql.org/issues/4965) - Fix an issue where the Interval data type is not displayed in the properties dialog of table/column. -[Issue \#4966](https://redmine.postgresql.org/issues/4966) - Fix 'Could not find the object on the server.' error while refreshing the check constraint. -[Issue \#4975](https://redmine.postgresql.org/issues/4975) - Fix issue where the user can not switch the UI language. It's a regression of \#4348. -[Issue \#4976](https://redmine.postgresql.org/issues/4976) - Fix reverse engineering SQL issue where when clause is not visible for PG/EPAS 12. -[Issue \#4982](https://redmine.postgresql.org/issues/4982) - Added statistics and storage information in reverse engineering SQL of table/column. -[Issue \#4985](https://redmine.postgresql.org/issues/4985) - Fix an issue where the inherited table name with quotes did not escape correctly. -[Issue \#4991](https://redmine.postgresql.org/issues/4991) - Fix an issue where context menu is open along with submenu and the focus is not on context menu or submenu. +[Issue #3130](https://redmine.postgresql.org/issues/3130) - Ensure create new object dialog should be opened when alt+shift+n key is pressed on the collection node. +[Issue #3279](https://redmine.postgresql.org/issues/3279) - Fixed issue where Drop and Disconnect connection menu points are too close to each other. +[Issue #3538](https://redmine.postgresql.org/issues/3538) - Fix issue where the Reset button does not get enabled till all the mandatory fields are provided in the dialog. +[Issue #3789](https://redmine.postgresql.org/issues/3789) - Ensure context menus never get hidden below the menu bar. +[Issue #3859](https://redmine.postgresql.org/issues/3859) - Rename the context menu from 'Drop Server' to 'Remove Server'. +[Issue #3913](https://redmine.postgresql.org/issues/3913) - Ensure the correct "running at" agent is shown when a pgAgent job is executing. +[Issue #3915](https://redmine.postgresql.org/issues/3915) - Fix an issue in the Query Tool where shortcut keys could be ignored following a query error. +[Issue #3999](https://redmine.postgresql.org/issues/3999) - Fix the toggle case shortcut key combination. +[Issue #4191](https://redmine.postgresql.org/issues/4191) - Ensure comments are shown in reverse engineered SQL for table partitions. +[Issue #4242](https://redmine.postgresql.org/issues/4242) - Handle NULL values appropriately when sorting backgrid tables. +[Issue #4341](https://redmine.postgresql.org/issues/4341) - Give appropriate error messages when the user tries to use an blank master password. +[Issue #4451](https://redmine.postgresql.org/issues/4451) - Remove arbitrary (and incorrect) requirement that composite types must have at least two members. +[Issue #4459](https://redmine.postgresql.org/issues/4459) - Don't quote bigints when copying them from the Query Tool results grid. +[Issue #4482](https://redmine.postgresql.org/issues/4482) - Ensure compression level is passed to pg_dump when backing up in directory format. +[Issue #4483](https://redmine.postgresql.org/issues/4483) - Ensure the number of jobs can be specified when backing up in directory format. +[Issue #4516](https://redmine.postgresql.org/issues/4516) - Remove the sorting of table headers with no labels. +[Issue #4564](https://redmine.postgresql.org/issues/4564) - Ensure Javascript errors during Query Tool execution are reported as such and not as Ajax errors. +[Issue #4610](https://redmine.postgresql.org/issues/4610) - Suppress Enter key presses in Alertify dialogues when the come from Select2 controls to allow item selection with Enter. +[Issue #4647](https://redmine.postgresql.org/issues/4647) - Ensure that units are respected when sorting by file size in the File dialog. +[Issue #4659](https://redmine.postgresql.org/issues/4659) - Updated documentation for default privileges to clarify more on the grantor. +[Issue #4674](https://redmine.postgresql.org/issues/4674) - Fix query tool launch error if user name contains HTML characters. It's a regression. +[Issue #4730](https://redmine.postgresql.org/issues/4730) - Ensure all messages are retained in the Query Tool from long running queries. +[Issue #4734](https://redmine.postgresql.org/issues/4734) - Updated documentation for the delete row button that only strikeout the row instead of deleting it. +[Issue #4761](https://redmine.postgresql.org/issues/4761) - Fix an issue where the wrong type is displayed when changing the datatype from timestamp with time zone to timestamp without time zone. +[Issue #4779](https://redmine.postgresql.org/issues/4779) - Updated documentation for the query tool toolbar buttons. +[Issue #4792](https://redmine.postgresql.org/issues/4792) - Ensure that the superuser should be able to create database, as the superuser overrides all the access restrictions. +[Issue #4878](https://redmine.postgresql.org/issues/4878) - Ensure that the superuser should be able to create role, as the superuser overrides all the access restrictions. +[Issue #4893](https://redmine.postgresql.org/issues/4893) - Fix reverse engineering SQL issue for partitions when specifying digits as comments. +[Issue #4896](https://redmine.postgresql.org/issues/4896) - Fixed an issue where escape key not working to close the open/save file dialog. +[Issue #4906](https://redmine.postgresql.org/issues/4906) - Fixed an issue where keyboard shortcut for context menu is not working when using Firefox on CentOS7. +[Issue #4923](https://redmine.postgresql.org/issues/4923) - Enhance the logic to change the label from 'Delete/Drop' to 'Remove' for the server and server group node. +[Issue #4925](https://redmine.postgresql.org/issues/4925) - Shown some text on process watcher till the initial logs are loaded. +[Issue #4930](https://redmine.postgresql.org/issues/4930) - Fix main window tab navigation accessibility issue. +[Issue #4935](https://redmine.postgresql.org/issues/4935) - Fix accessibility issues. +[Issue #4947](https://redmine.postgresql.org/issues/4947) - Fix XSS issue in explain and explain analyze for table and type which contain HTML. +[Issue #4952](https://redmine.postgresql.org/issues/4952) - Fix an issue of retrieving properties for Compound Triggers. It's a regression of #4006. +[Issue #4953](https://redmine.postgresql.org/issues/4953) - Fix an issue where PEM unable to retrieve table node if the trigger is already disabled and the user clicks on Enable All. +[Issue #4958](https://redmine.postgresql.org/issues/4958) - Fix reverse engineering SQL issue for triggers when passed a single argument to trigger function. +[Issue #4964](https://redmine.postgresql.org/issues/4964) - Fix an issue where length and precision are not removed from table/column dialog. +[Issue #4965](https://redmine.postgresql.org/issues/4965) - Fix an issue where the Interval data type is not displayed in the properties dialog of table/column. +[Issue #4966](https://redmine.postgresql.org/issues/4966) - Fix 'Could not find the object on the server.' error while refreshing the check constraint. +[Issue #4975](https://redmine.postgresql.org/issues/4975) - Fix issue where the user can not switch the UI language. It's a regression of #4348. +[Issue #4976](https://redmine.postgresql.org/issues/4976) - Fix reverse engineering SQL issue where when clause is not visible for PG/EPAS 12. +[Issue #4982](https://redmine.postgresql.org/issues/4982) - Added statistics and storage information in reverse engineering SQL of table/column. +[Issue #4985](https://redmine.postgresql.org/issues/4985) - Fix an issue where the inherited table name with quotes did not escape correctly. +[Issue #4991](https://redmine.postgresql.org/issues/4991) - Fix an issue where context menu is open along with submenu and the focus is not on context menu or submenu. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/08_pem_release_notes_7_11.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/08_pem_release_notes_7_11.mdx index 924e36c9846..096412e8a7d 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/08_pem_release_notes_7_11.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/08_pem_release_notes_7_11.mdx @@ -2,27 +2,25 @@ title: "PEM v7.11" --- - - Release date: 2019-12-18 ## Features -[Issue \#4778](https://redmine.postgresql.org/issues/4778) - Implemented the Query Plan Analyser. +[Issue #4778](https://redmine.postgresql.org/issues/4778) - Implemented the Query Plan Analyser. ## Housekeeping ## Bug fixes -PEM-2793 - User with 'super\_pem\_admin' role should be able to see all servers and agents. (922716) +PEM-2793 - User with 'super_pem_admin' role should be able to see all servers and agents. (922716) PEM-2827 - Fixed a security risk - PEMAgent used to run the SHELL/BATCH script(s), defined as the step(s) of user job(s), alert script(s) and custom batch probes, as root user on non-windows system and as an Administrator on Windows. -[Issue \#3386](https://redmine.postgresql.org/issues/3386) - Ensure backup a partition table should not backup the whole database. -[Issue \#4590](https://redmine.postgresql.org/issues/4590) - Fix issue where backup fails for schema name that needs quoting. -[Issue \#4728](https://redmine.postgresql.org/issues/4728) - Highlighted the color of closing or opening parenthesis when user select them in CodeMirror. -[Issue \#4751](https://redmine.postgresql.org/issues/4751) - Fix issue where export job fails when deselecting all the columns. -[Issue \#4753](https://redmine.postgresql.org/issues/4753) - Fix an error where 'false' string is displayed when we add a new parameter in the Parameters tab, also clear the old value when the user changes the parameter name. -[Issue \#4760](https://redmine.postgresql.org/issues/4760) - Ensure the search path should not be quoted for Database. -[Issue \#4780](https://redmine.postgresql.org/issues/4780) - Ensure the search path should not be quoted for Function, Procedure and Trigger Function. -[Issue \#4791](https://redmine.postgresql.org/issues/4791) - Fix issue where VALID foreign keys show as NOT VALID in the SQL tab for tables. -[Issue \#4845](https://redmine.postgresql.org/issues/4845) - Fixed potential error in the properties dialog for the Code tab. -[Issue \#4850](https://redmine.postgresql.org/issues/4850) - Fixed an issue where Datetimepicker control opens when clicking on the label. +[Issue #3386](https://redmine.postgresql.org/issues/3386) - Ensure backup a partition table should not backup the whole database. +[Issue #4590](https://redmine.postgresql.org/issues/4590) - Fix issue where backup fails for schema name that needs quoting. +[Issue #4728](https://redmine.postgresql.org/issues/4728) - Highlighted the color of closing or opening parenthesis when user select them in CodeMirror. +[Issue #4751](https://redmine.postgresql.org/issues/4751) - Fix issue where export job fails when deselecting all the columns. +[Issue #4753](https://redmine.postgresql.org/issues/4753) - Fix an error where 'false' string is displayed when we add a new parameter in the Parameters tab, also clear the old value when the user changes the parameter name. +[Issue #4760](https://redmine.postgresql.org/issues/4760) - Ensure the search path should not be quoted for Database. +[Issue #4780](https://redmine.postgresql.org/issues/4780) - Ensure the search path should not be quoted for Function, Procedure and Trigger Function. +[Issue #4791](https://redmine.postgresql.org/issues/4791) - Fix issue where VALID foreign keys show as NOT VALID in the SQL tab for tables. +[Issue #4845](https://redmine.postgresql.org/issues/4845) - Fixed potential error in the properties dialog for the Code tab. +[Issue #4850](https://redmine.postgresql.org/issues/4850) - Fixed an issue where Datetimepicker control opens when clicking on the label. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/09_pem_release_notes_7_10.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/09_pem_release_notes_7_10.mdx index 86db8d4b1ce..19915f8c308 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/09_pem_release_notes_7_10.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/09_pem_release_notes_7_10.mdx @@ -2,106 +2,104 @@ title: "PEM v7.10" --- - - Release date: 2019-10-09 ## Features -Issue \#PEM-952 - Allow to configure and manage the BART server through PEM (limited functionalities) -Issue \#PEM-2088 - Allow a banner to be displayed on the login and other related pages showing custom text. (869690) -Issue \#PEM-2152 - Allow to manage and monitor PostgreSQL 12 and EDB Postgres Advanced Server 12. -[Issue \#3009](https://redmine.postgresql.org/issues/3009) - Added Copy with headers functionality when copy data from Query Tool/View Data. -[Issue \#4144](https://redmine.postgresql.org/issues/4144) - Add support of Compound Triggers for EPAS 12+. -[Issue \#4139](https://redmine.postgresql.org/issues/4139) - Allow some objects to be dragged/dropped into the Query Tool to insert their signature into the query text. -[Issue \#4333](https://redmine.postgresql.org/issues/4333) - Add support for planner support functions in PostgreSQL 12+ functions. -[Issue \#4334](https://redmine.postgresql.org/issues/4334) - Add support for generated columns in Postgres 12+. -[Issue \#4540](https://redmine.postgresql.org/issues/4540) - Use the full tab space for CodeMirror instances on dialogues where appropriate. -[Issue \#4566](https://redmine.postgresql.org/issues/4566) - Allow enhanced cookie protection to be disabled for compatibility with dynamically addressed hosting environments. -[Issue \#4570](https://redmine.postgresql.org/issues/4570) - Add an optimisation to the internal code responsible for searching for treeview nodes. -[Issue \#4574](https://redmine.postgresql.org/issues/4574) - Display the row count in the popup message when counting table rows, not just in the properties list. -[Issue \#4612](https://redmine.postgresql.org/issues/4612) - Add support in query history to show internal queries generated by pgAdmin during save data operations. -[Issue \#4667](https://redmine.postgresql.org/issues/4667) - Ensure editable and read-only columns in Query Tool should be identified by icons and tooltips in the column header. +Issue #PEM-952 - Allow to configure and manage the BART server through PEM (limited functionalities) +Issue #PEM-2088 - Allow a banner to be displayed on the login and other related pages showing custom text. (869690) +Issue #PEM-2152 - Allow to manage and monitor PostgreSQL 12 and EDB Postgres Advanced Server 12. +[Issue #3009](https://redmine.postgresql.org/issues/3009) - Added Copy with headers functionality when copy data from Query Tool/View Data. +[Issue #4144](https://redmine.postgresql.org/issues/4144) - Add support of Compound Triggers for EPAS 12+. +[Issue #4139](https://redmine.postgresql.org/issues/4139) - Allow some objects to be dragged/dropped into the Query Tool to insert their signature into the query text. +[Issue #4333](https://redmine.postgresql.org/issues/4333) - Add support for planner support functions in PostgreSQL 12+ functions. +[Issue #4334](https://redmine.postgresql.org/issues/4334) - Add support for generated columns in Postgres 12+. +[Issue #4540](https://redmine.postgresql.org/issues/4540) - Use the full tab space for CodeMirror instances on dialogues where appropriate. +[Issue #4566](https://redmine.postgresql.org/issues/4566) - Allow enhanced cookie protection to be disabled for compatibility with dynamically addressed hosting environments. +[Issue #4570](https://redmine.postgresql.org/issues/4570) - Add an optimisation to the internal code responsible for searching for treeview nodes. +[Issue #4574](https://redmine.postgresql.org/issues/4574) - Display the row count in the popup message when counting table rows, not just in the properties list. +[Issue #4612](https://redmine.postgresql.org/issues/4612) - Add support in query history to show internal queries generated by pgAdmin during save data operations. +[Issue #4667](https://redmine.postgresql.org/issues/4667) - Ensure editable and read-only columns in Query Tool should be identified by icons and tooltips in the column header. ## Housekeeping -[Issue \#4472](https://redmine.postgresql.org/issues/4472) - Add Reverse Engineered and Modified SQL tests for Synonyms. -[Issue \#4546](https://redmine.postgresql.org/issues/4546) - Add Reverse Engineered SQL tests for Columns. -[Issue \#4554](https://redmine.postgresql.org/issues/4554) - Add Reverse Engineered SQL tests for Trigger Functions. -[Issue \#4555](https://redmine.postgresql.org/issues/4555) - Add Reverse Engineered SQL tests for Exclusion Constraint. -[Issue \#4560](https://redmine.postgresql.org/issues/4560) - Add a --modules option to the RE-SQL test suite to allow testing of specific object types. -[Issue \#4575](https://redmine.postgresql.org/issues/4575) - Add Reverse Engineered SQL tests for Schemas. -[Issue \#4576](https://redmine.postgresql.org/issues/4576) - Add Reverse Engineered SQL tests for Views. -[Issue \#4600](https://redmine.postgresql.org/issues/4600) - Add Reverse Engineered SQL tests for Rules. -[Issue \#4616](https://redmine.postgresql.org/issues/4616) - Add Reverse Engineered and Modified SQL tests for Foreign Keys. -[Issue \#4617](https://redmine.postgresql.org/issues/4617) - Add Reverse Engineered and Modified SQL tests for Foreign Servers. -[Issue \#4618](https://redmine.postgresql.org/issues/4618) - Add Reverse Engineered and Modified SQL tests for Foreign Tables. -[Issue \#4619](https://redmine.postgresql.org/issues/4619) - Add Reverse Engineered and Modified SQL tests for FTS Templates. -[Issue \#4621](https://redmine.postgresql.org/issues/4621) - Add Reverse Engineered and Modified SQL tests for Indexes. -[Issue \#4624](https://redmine.postgresql.org/issues/4624) - Add Reverse Engineered and Modified SQL tests for Primary Keys. -[Issue \#4627](https://redmine.postgresql.org/issues/4627) - Add Reverse Engineered and Modified SQL tests for User Mappings. -[Issue \#4628](https://redmine.postgresql.org/issues/4628) - Add Reverse Engineered and Modified SQL tests for Unique Constraints. -[Issue \#4690](https://redmine.postgresql.org/issues/4690) - Add Modified SQL tests for Resource Group. +[Issue #4472](https://redmine.postgresql.org/issues/4472) - Add Reverse Engineered and Modified SQL tests for Synonyms. +[Issue #4546](https://redmine.postgresql.org/issues/4546) - Add Reverse Engineered SQL tests for Columns. +[Issue #4554](https://redmine.postgresql.org/issues/4554) - Add Reverse Engineered SQL tests for Trigger Functions. +[Issue #4555](https://redmine.postgresql.org/issues/4555) - Add Reverse Engineered SQL tests for Exclusion Constraint. +[Issue #4560](https://redmine.postgresql.org/issues/4560) - Add a --modules option to the RE-SQL test suite to allow testing of specific object types. +[Issue #4575](https://redmine.postgresql.org/issues/4575) - Add Reverse Engineered SQL tests for Schemas. +[Issue #4576](https://redmine.postgresql.org/issues/4576) - Add Reverse Engineered SQL tests for Views. +[Issue #4600](https://redmine.postgresql.org/issues/4600) - Add Reverse Engineered SQL tests for Rules. +[Issue #4616](https://redmine.postgresql.org/issues/4616) - Add Reverse Engineered and Modified SQL tests for Foreign Keys. +[Issue #4617](https://redmine.postgresql.org/issues/4617) - Add Reverse Engineered and Modified SQL tests for Foreign Servers. +[Issue #4618](https://redmine.postgresql.org/issues/4618) - Add Reverse Engineered and Modified SQL tests for Foreign Tables. +[Issue #4619](https://redmine.postgresql.org/issues/4619) - Add Reverse Engineered and Modified SQL tests for FTS Templates. +[Issue #4621](https://redmine.postgresql.org/issues/4621) - Add Reverse Engineered and Modified SQL tests for Indexes. +[Issue #4624](https://redmine.postgresql.org/issues/4624) - Add Reverse Engineered and Modified SQL tests for Primary Keys. +[Issue #4627](https://redmine.postgresql.org/issues/4627) - Add Reverse Engineered and Modified SQL tests for User Mappings. +[Issue #4628](https://redmine.postgresql.org/issues/4628) - Add Reverse Engineered and Modified SQL tests for Unique Constraints. +[Issue #4690](https://redmine.postgresql.org/issues/4690) - Add Modified SQL tests for Resource Group. ## Bug fixes -Issue \#PEM-706 - Changed the label from "Blocked Users" to "Blocked Sessions" on User Activity chart. (661653) -Issue \#PEM-2492 - Ensure parameter values are quoted when needed when editing roles. (876762) -Issue \#PEM-2581 - Error when changing kind(SQL/BATCH) for pgAgent job step. (893794) -Issue \#PEM-2727 - Upgrading SNMP to fix the agent crash issue (913881) -[Issue \#2706](https://redmine.postgresql.org/issues/2706) - Added ProjectSet icon for explain module. -[Issue \#2828](https://redmine.postgresql.org/issues/2828) - Added Gather Merge, Named Tuple Store Scan and Table Function Scan icon for explain module. -[Issue \#3605](https://redmine.postgresql.org/issues/3605) - Fix issue where Deleting N number of rows makes first N number of rows disable. -[Issue \#3778](https://redmine.postgresql.org/issues/3778) - Ensure Boolean columns should be editable using keyboard keys. -[Issue \#3936](https://redmine.postgresql.org/issues/3936) - Further code refactoring to stabilise the Feature Tests. -[Issue \#4179](https://redmine.postgresql.org/issues/4179) - Fix generation of reverse engineered SQL for tables with Greenplum 5.x. -[Issue \#4199](https://redmine.postgresql.org/issues/4199) - Ensure that 'ENTER' key in the data filter should not run the query. -[Issue \#4229](https://redmine.postgresql.org/issues/4229) - Update wcDocker to allow the browser's context menu to be used except in tab strips and panel headers. -[Issue \#4381](https://redmine.postgresql.org/issues/4381) - Fix an issue where oid column should not be pasted when copy/paste row is used on query output containing the oid column. -[Issue \#4401](https://redmine.postgresql.org/issues/4401) - Ensure type names are properly encoded in the results grid. -[Issue \#4408](https://redmine.postgresql.org/issues/4408) - Fix display of validation error message in SlickGrid cells. -[Issue \#4412](https://redmine.postgresql.org/issues/4412) - Fix issue where Validated switch option is inverted for the Foreign Key. -[Issue \#4414](https://redmine.postgresql.org/issues/4414) - Fix generation of reverse engineered SQL for partition table, partitions were shown as a child of indexes. -[Issue \#4419](https://redmine.postgresql.org/issues/4419) - Fix a debugger error when using Python 2.7. -[Issue \#4429](https://redmine.postgresql.org/issues/4429) - Ensure drag/drop from the treeview works as expected on Firefox. -[Issue \#4461](https://redmine.postgresql.org/issues/4461) - Fix error while importing data to a table using Import/Export dialog and providing "Not null columns" option. -[Issue \#4486](https://redmine.postgresql.org/issues/4486) - Ensure View should be created with special characters. -[Issue \#4487](https://redmine.postgresql.org/issues/4487) - Ensure Boolean columns should be editable in View/Edit data and Query Tool. -[Issue \#4489](https://redmine.postgresql.org/issues/4489) - Update wcDocker to prevent window state loading creating blank dialogues. -[Issue \#4490](https://redmine.postgresql.org/issues/4490) - Fix accessibility issue for checkbox in IE11. -[Issue \#4492](https://redmine.postgresql.org/issues/4492) - Ensure the Query Tool doesn't throw an error when viewing the contents of a table with no columns. -[Issue \#4496](https://redmine.postgresql.org/issues/4496) - Ensure columns can be created when they are IDENTITY fields with the CYCLE option enabled. -[Issue \#4497](https://redmine.postgresql.org/issues/4497) - Ensure purely numeric comments can be saved on new columns. -[Issue \#4508](https://redmine.postgresql.org/issues/4508) - Fix accessibility issue for Datetime cell in backgrid. -[Issue \#4520](https://redmine.postgresql.org/issues/4520) - Ensure the query tool will work with older versions of psycopg2 than we officially support, albeit without updatable resultsets. -[Issue \#4525](https://redmine.postgresql.org/issues/4525) - Ensure command tags are shown in the messages tab of the Query Tool. -[Issue \#4536](https://redmine.postgresql.org/issues/4536) - Fix load on demand in View/Edit data mode. -[Issue \#4552](https://redmine.postgresql.org/issues/4552) - Fix some errors thrown on the JS console when dragging text in the Query Tool. -[Issue \#4559](https://redmine.postgresql.org/issues/4559) - Ensure triggers should be updated properly for EPAS server. -[Issue \#4565](https://redmine.postgresql.org/issues/4565) - Fix the reverse engineered SQL for trigger functions with the WINDOW option selected. -[Issue \#4577](https://redmine.postgresql.org/issues/4577) - Fix an error that could be seen when click on any system column of a table. -[Issue \#4578](https://redmine.postgresql.org/issues/4578) - Ensure enable trigger menu should be visible when trigger is disabled. -[Issue \#4581](https://redmine.postgresql.org/issues/4581) - Ensure the comment on a Primary Key constraint can be edited under the Table node. -[Issue \#4582](https://redmine.postgresql.org/issues/4582) - Fix console error when changing kind(SQL/BATCH) for pgAgent job step. -[Issue \#4584](https://redmine.postgresql.org/issues/4584) - Unescape HTML entities in database names in the Query Tool title bar. -[Issue \#4585](https://redmine.postgresql.org/issues/4585) - Fix double click issue to expand the contents of a cell if the resultset was not editable. -[Issue \#4586](https://redmine.postgresql.org/issues/4586) - Fix generation of reverse engineered SQL for Rules. -[Issue \#4631](https://redmine.postgresql.org/issues/4631) - Add editor options for plain text mode and to disable block folding to workaround rendering speed issues in CodeMirror with very large scripts. -[Issue \#4635](https://redmine.postgresql.org/issues/4635) - Ensure compound triggers for event should be updated properly. -[Issue \#4638](https://redmine.postgresql.org/issues/4638) - Ensure compound triggers should be displayed under Views. -[Issue \#4641](https://redmine.postgresql.org/issues/4641) - Ensure Truncate option should be available for Compound Triggers. -[Issue \#4643](https://redmine.postgresql.org/issues/4643) - Fix Truncate option deselect issue for compound triggers. -[Issue \#4644](https://redmine.postgresql.org/issues/4644) - Fix length and precision enable/disable issue when changing the data type for Domain node. -[Issue \#4650](https://redmine.postgresql.org/issues/4650) - Fix SQL tab issue for Views. It's a regression of compound triggers. -[Issue \#4663](https://redmine.postgresql.org/issues/4663) - Fix exception in query history for python 2.7. -[Issue \#4674](https://redmine.postgresql.org/issues/4674) - Fix query tool launch error if user name contain html characters. -[Issue \#4681](https://redmine.postgresql.org/issues/4681) - Increase cache control max age for static files to improve performance over longer run. -[Issue \#4698](https://redmine.postgresql.org/issues/4698) - Fix SQL issue of length and precision when changing the data type of Column. -[Issue \#4702](https://redmine.postgresql.org/issues/4702) - Fix modified SQL for Index when reset the value of Fill factor and Clustered?. -[Issue \#4703](https://redmine.postgresql.org/issues/4703) - Fix reversed engineered SQL for btree Index when provided sort order and NULLs. -[Issue \#4726](https://redmine.postgresql.org/issues/4726) - Ensure sequence with negative value should be created. -[Issue \#4727](https://redmine.postgresql.org/issues/4727) - Fix issue where EXEC script doesn't write the complete script for Procedures. -[Issue \#4742](https://redmine.postgresql.org/issues/4742) - Ensure Primary Key should be created with Index. -[Issue \#4750](https://redmine.postgresql.org/issues/4750) - Fix query history exception for Python 3.6. -[Issue \#4756](https://redmine.postgresql.org/issues/4756) - Fix issue where PEM/pgAdmin does not load completely if loaded in an iframe. -[Issue \#4777](https://redmine.postgresql.org/issues/4777) - Fix issue where query history is not visible in the query history tab. +Issue #PEM-706 - Changed the label from "Blocked Users" to "Blocked Sessions" on User Activity chart. (661653) +Issue #PEM-2492 - Ensure parameter values are quoted when needed when editing roles. (876762) +Issue #PEM-2581 - Error when changing kind(SQL/BATCH) for pgAgent job step. (893794) +Issue #PEM-2727 - Upgrading SNMP to fix the agent crash issue (913881) +[Issue #2706](https://redmine.postgresql.org/issues/2706) - Added ProjectSet icon for explain module. +[Issue #2828](https://redmine.postgresql.org/issues/2828) - Added Gather Merge, Named Tuple Store Scan and Table Function Scan icon for explain module. +[Issue #3605](https://redmine.postgresql.org/issues/3605) - Fix issue where Deleting N number of rows makes first N number of rows disable. +[Issue #3778](https://redmine.postgresql.org/issues/3778) - Ensure Boolean columns should be editable using keyboard keys. +[Issue #3936](https://redmine.postgresql.org/issues/3936) - Further code refactoring to stabilise the Feature Tests. +[Issue #4179](https://redmine.postgresql.org/issues/4179) - Fix generation of reverse engineered SQL for tables with Greenplum 5.x. +[Issue #4199](https://redmine.postgresql.org/issues/4199) - Ensure that 'ENTER' key in the data filter should not run the query. +[Issue #4229](https://redmine.postgresql.org/issues/4229) - Update wcDocker to allow the browser's context menu to be used except in tab strips and panel headers. +[Issue #4381](https://redmine.postgresql.org/issues/4381) - Fix an issue where oid column should not be pasted when copy/paste row is used on query output containing the oid column. +[Issue #4401](https://redmine.postgresql.org/issues/4401) - Ensure type names are properly encoded in the results grid. +[Issue #4408](https://redmine.postgresql.org/issues/4408) - Fix display of validation error message in SlickGrid cells. +[Issue #4412](https://redmine.postgresql.org/issues/4412) - Fix issue where Validated switch option is inverted for the Foreign Key. +[Issue #4414](https://redmine.postgresql.org/issues/4414) - Fix generation of reverse engineered SQL for partition table, partitions were shown as a child of indexes. +[Issue #4419](https://redmine.postgresql.org/issues/4419) - Fix a debugger error when using Python 2.7. +[Issue #4429](https://redmine.postgresql.org/issues/4429) - Ensure drag/drop from the treeview works as expected on Firefox. +[Issue #4461](https://redmine.postgresql.org/issues/4461) - Fix error while importing data to a table using Import/Export dialog and providing "Not null columns" option. +[Issue #4486](https://redmine.postgresql.org/issues/4486) - Ensure View should be created with special characters. +[Issue #4487](https://redmine.postgresql.org/issues/4487) - Ensure Boolean columns should be editable in View/Edit data and Query Tool. +[Issue #4489](https://redmine.postgresql.org/issues/4489) - Update wcDocker to prevent window state loading creating blank dialogues. +[Issue #4490](https://redmine.postgresql.org/issues/4490) - Fix accessibility issue for checkbox in IE11. +[Issue #4492](https://redmine.postgresql.org/issues/4492) - Ensure the Query Tool doesn't throw an error when viewing the contents of a table with no columns. +[Issue #4496](https://redmine.postgresql.org/issues/4496) - Ensure columns can be created when they are IDENTITY fields with the CYCLE option enabled. +[Issue #4497](https://redmine.postgresql.org/issues/4497) - Ensure purely numeric comments can be saved on new columns. +[Issue #4508](https://redmine.postgresql.org/issues/4508) - Fix accessibility issue for Datetime cell in backgrid. +[Issue #4520](https://redmine.postgresql.org/issues/4520) - Ensure the query tool will work with older versions of psycopg2 than we officially support, albeit without updatable resultsets. +[Issue #4525](https://redmine.postgresql.org/issues/4525) - Ensure command tags are shown in the messages tab of the Query Tool. +[Issue #4536](https://redmine.postgresql.org/issues/4536) - Fix load on demand in View/Edit data mode. +[Issue #4552](https://redmine.postgresql.org/issues/4552) - Fix some errors thrown on the JS console when dragging text in the Query Tool. +[Issue #4559](https://redmine.postgresql.org/issues/4559) - Ensure triggers should be updated properly for EPAS server. +[Issue #4565](https://redmine.postgresql.org/issues/4565) - Fix the reverse engineered SQL for trigger functions with the WINDOW option selected. +[Issue #4577](https://redmine.postgresql.org/issues/4577) - Fix an error that could be seen when click on any system column of a table. +[Issue #4578](https://redmine.postgresql.org/issues/4578) - Ensure enable trigger menu should be visible when trigger is disabled. +[Issue #4581](https://redmine.postgresql.org/issues/4581) - Ensure the comment on a Primary Key constraint can be edited under the Table node. +[Issue #4582](https://redmine.postgresql.org/issues/4582) - Fix console error when changing kind(SQL/BATCH) for pgAgent job step. +[Issue #4584](https://redmine.postgresql.org/issues/4584) - Unescape HTML entities in database names in the Query Tool title bar. +[Issue #4585](https://redmine.postgresql.org/issues/4585) - Fix double click issue to expand the contents of a cell if the resultset was not editable. +[Issue #4586](https://redmine.postgresql.org/issues/4586) - Fix generation of reverse engineered SQL for Rules. +[Issue #4631](https://redmine.postgresql.org/issues/4631) - Add editor options for plain text mode and to disable block folding to workaround rendering speed issues in CodeMirror with very large scripts. +[Issue #4635](https://redmine.postgresql.org/issues/4635) - Ensure compound triggers for event should be updated properly. +[Issue #4638](https://redmine.postgresql.org/issues/4638) - Ensure compound triggers should be displayed under Views. +[Issue #4641](https://redmine.postgresql.org/issues/4641) - Ensure Truncate option should be available for Compound Triggers. +[Issue #4643](https://redmine.postgresql.org/issues/4643) - Fix Truncate option deselect issue for compound triggers. +[Issue #4644](https://redmine.postgresql.org/issues/4644) - Fix length and precision enable/disable issue when changing the data type for Domain node. +[Issue #4650](https://redmine.postgresql.org/issues/4650) - Fix SQL tab issue for Views. It's a regression of compound triggers. +[Issue #4663](https://redmine.postgresql.org/issues/4663) - Fix exception in query history for python 2.7. +[Issue #4674](https://redmine.postgresql.org/issues/4674) - Fix query tool launch error if user name contain html characters. +[Issue #4681](https://redmine.postgresql.org/issues/4681) - Increase cache control max age for static files to improve performance over longer run. +[Issue #4698](https://redmine.postgresql.org/issues/4698) - Fix SQL issue of length and precision when changing the data type of Column. +[Issue #4702](https://redmine.postgresql.org/issues/4702) - Fix modified SQL for Index when reset the value of Fill factor and Clustered?. +[Issue #4703](https://redmine.postgresql.org/issues/4703) - Fix reversed engineered SQL for btree Index when provided sort order and NULLs. +[Issue #4726](https://redmine.postgresql.org/issues/4726) - Ensure sequence with negative value should be created. +[Issue #4727](https://redmine.postgresql.org/issues/4727) - Fix issue where EXEC script doesn't write the complete script for Procedures. +[Issue #4742](https://redmine.postgresql.org/issues/4742) - Ensure Primary Key should be created with Index. +[Issue #4750](https://redmine.postgresql.org/issues/4750) - Fix query history exception for Python 3.6. +[Issue #4756](https://redmine.postgresql.org/issues/4756) - Fix issue where PEM/pgAdmin does not load completely if loaded in an iframe. +[Issue #4777](https://redmine.postgresql.org/issues/4777) - Fix issue where query history is not visible in the query history tab. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/10_pem_release_notes_7_9.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/10_pem_release_notes_7_9.mdx index 167cb2d6029..c61d78b39a3 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/10_pem_release_notes_7_9.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/10_pem_release_notes_7_9.mdx @@ -2,83 +2,81 @@ title: "PEM v7.9" --- - - Release date: 2019-07-31 ## Features -Issue \#PEM-2525 - Send SMTP notification on the completion of a scheduled task -Issue \#PEM-2526 - Schedule a SHELL/BATCH script, SQL based jobs at agent level -Issue \#PEM-2048 - Allow to configure pem-server without disabling the selinux on RHLE/CentOS (883342) -[Issue \#1760](https://redmine.postgresql.org/issues/1760) - Add support for editing of resultsets in the Query Tool, if the data can be identified as updatable. -[Issue \#2653](https://redmine.postgresql.org/issues/2653) - Allow the UI layout to be fully locked or to prevent docking changes. -[Issue \#3174](https://redmine.postgresql.org/issues/3174) - Visually distinguish simple tables from tables that are inherited and from which other tables are inherited. -[Issue \#4283](https://redmine.postgresql.org/issues/4283) - Initial support for PostgreSQL 12. -[Issue \#4288](https://redmine.postgresql.org/issues/4288) - Initial support for PostgreSQL 12. -[Issue \#4290](https://redmine.postgresql.org/issues/4290) - Initial support for PostgreSQL 12. -[Issue \#4318](https://redmine.postgresql.org/issues/4318) - Set the mouse cursor appropriately based on the layout lock state. -[Issue \#4335](https://redmine.postgresql.org/issues/4335) - Add EXPLAIN options for SETTINGS and SUMMARY. +Issue #PEM-2525 - Send SMTP notification on the completion of a scheduled task +Issue #PEM-2526 - Schedule a SHELL/BATCH script, SQL based jobs at agent level +Issue #PEM-2048 - Allow to configure pem-server without disabling the selinux on RHLE/CentOS (883342) +[Issue #1760](https://redmine.postgresql.org/issues/1760) - Add support for editing of resultsets in the Query Tool, if the data can be identified as updatable. +[Issue #2653](https://redmine.postgresql.org/issues/2653) - Allow the UI layout to be fully locked or to prevent docking changes. +[Issue #3174](https://redmine.postgresql.org/issues/3174) - Visually distinguish simple tables from tables that are inherited and from which other tables are inherited. +[Issue #4283](https://redmine.postgresql.org/issues/4283) - Initial support for PostgreSQL 12. +[Issue #4288](https://redmine.postgresql.org/issues/4288) - Initial support for PostgreSQL 12. +[Issue #4290](https://redmine.postgresql.org/issues/4290) - Initial support for PostgreSQL 12. +[Issue #4318](https://redmine.postgresql.org/issues/4318) - Set the mouse cursor appropriately based on the layout lock state. +[Issue #4335](https://redmine.postgresql.org/issues/4335) - Add EXPLAIN options for SETTINGS and SUMMARY. ## Housekeeping -[Issue \#4202](https://redmine.postgresql.org/issues/4202) - Add a framework for testing reversed engineered SQL and CRUD API endpoints. -[Issue \#4415](https://redmine.postgresql.org/issues/4415) - Add Reverse Engineered SQL tests for Roles and Resource Groups. -[Issue \#4441](https://redmine.postgresql.org/issues/4441) - Add Reverse Engineered SQL tests for FDWs. -[Issue \#4450](https://redmine.postgresql.org/issues/4450) - Fix reverse engineered sql for Foreign Data Wrapper created on EPAS server in redwood mode. -[Issue \#4452](https://redmine.postgresql.org/issues/4452) - Add Reverse Engineered SQL tests for Languages. -[Issue \#4453](https://redmine.postgresql.org/issues/4453) - Add Reverse Engineered SQL tests for Extensions. -[Issue \#4454](https://redmine.postgresql.org/issues/4454) - Add Reverse Engineered SQL tests for FTS Configurations. -[Issue \#4456](https://redmine.postgresql.org/issues/4456) - Add Reverse Engineered SQL tests for Packages. -[Issue \#4460](https://redmine.postgresql.org/issues/4460) - Add Reverse Engineered SQL tests for FTS Dictionaries. -[Issue \#4463](https://redmine.postgresql.org/issues/4463) - Add Reverse Engineered SQL tests for Domains. -[Issue \#4464](https://redmine.postgresql.org/issues/4464) - Add Reverse Engineered SQL tests for Collations. -[Issue \#4468](https://redmine.postgresql.org/issues/4468) - Add Reverse Engineered SQL tests for Types. -[Issue \#4469](https://redmine.postgresql.org/issues/4469) - Add Reverse Engineered SQL tests for Sequences. -[Issue \#4471](https://redmine.postgresql.org/issues/4471) - Add Reverse Engineered SQL tests for FTS Parsers. +[Issue #4202](https://redmine.postgresql.org/issues/4202) - Add a framework for testing reversed engineered SQL and CRUD API endpoints. +[Issue #4415](https://redmine.postgresql.org/issues/4415) - Add Reverse Engineered SQL tests for Roles and Resource Groups. +[Issue #4441](https://redmine.postgresql.org/issues/4441) - Add Reverse Engineered SQL tests for FDWs. +[Issue #4450](https://redmine.postgresql.org/issues/4450) - Fix reverse engineered sql for Foreign Data Wrapper created on EPAS server in redwood mode. +[Issue #4452](https://redmine.postgresql.org/issues/4452) - Add Reverse Engineered SQL tests for Languages. +[Issue #4453](https://redmine.postgresql.org/issues/4453) - Add Reverse Engineered SQL tests for Extensions. +[Issue #4454](https://redmine.postgresql.org/issues/4454) - Add Reverse Engineered SQL tests for FTS Configurations. +[Issue #4456](https://redmine.postgresql.org/issues/4456) - Add Reverse Engineered SQL tests for Packages. +[Issue #4460](https://redmine.postgresql.org/issues/4460) - Add Reverse Engineered SQL tests for FTS Dictionaries. +[Issue #4463](https://redmine.postgresql.org/issues/4463) - Add Reverse Engineered SQL tests for Domains. +[Issue #4464](https://redmine.postgresql.org/issues/4464) - Add Reverse Engineered SQL tests for Collations. +[Issue #4468](https://redmine.postgresql.org/issues/4468) - Add Reverse Engineered SQL tests for Types. +[Issue #4469](https://redmine.postgresql.org/issues/4469) - Add Reverse Engineered SQL tests for Sequences. +[Issue #4471](https://redmine.postgresql.org/issues/4471) - Add Reverse Engineered SQL tests for FTS Parsers. ## Bug fixes -Issue \#PEM-2459 - Tuning wizard shows wrong original value for 'max\_wal\_size' parameter \[Support Ticket \#880186\] -Issue \#PEM-2499 - Index Advisor is not suggesting index on PEM UI. \[Support Ticket \#891318\] -Issue \#PEM-2503 - Issue while scheduling the pgAgent job on PEM UI. \[Support Ticket \#891220\] -[Issue \#3919](https://redmine.postgresql.org/issues/3919) - Allow keyboard navigation of all controls on subnode grids. -[Issue \#3994](https://redmine.postgresql.org/issues/3994) - Fix issue where the dependencies tab for inherited tables/foreign keys shows partial text. -[Issue \#3996](https://redmine.postgresql.org/issues/3996) - Fix dropping of pgAgent schedules through the Job properties. -[Issue \#4036](https://redmine.postgresql.org/issues/4036) - Allow editing of data where a primary key column includes a % sign in the value. -[Issue \#4162](https://redmine.postgresql.org/issues/4162) - Fix syntax error when adding more than one column to the existing table. -[Issue \#4169](https://redmine.postgresql.org/issues/4169) - Omit the geometry viewer in the Query Tool from layout saving. -[Issue \#4171](https://redmine.postgresql.org/issues/4171) - Fix issue where reverse engineered SQL was failing for foreign tables, if it had "=" in the options. -[Issue \#4195](https://redmine.postgresql.org/issues/4195) - Fix keyboard navigation in "inner" tabsets such as the Query Tool and Debugger. -[Issue \#4224](https://redmine.postgresql.org/issues/4224) - Prevent flickering of large tooltips on the Graphical EXPLAIN canvas. -[Issue \#4228](https://redmine.postgresql.org/issues/4228) - Ensure the correct label is used in panel headers when viewing filtered rows. -[Issue \#4253](https://redmine.postgresql.org/issues/4253) - Fix issue where new column should be created with Default value. -[Issue \#4255](https://redmine.postgresql.org/issues/4255) - Prevent the geometry viewer grabbing key presses when not in focus under Firefox, IE and Edge. -[Issue \#4284](https://redmine.postgresql.org/issues/4284) - Fix syntax error when creating a table with a serial column. -[Issue \#4320](https://redmine.postgresql.org/issues/4320) - Fix issue where SSH tunnel connection using password is failing, it's regression of Master Password. -[Issue \#4329](https://redmine.postgresql.org/issues/4329) - Fix an initialisation error when two functions with parameters are debugged in parallel. -[Issue \#4343](https://redmine.postgresql.org/issues/4343) - Fix issue where property dialog of column should open properly for EPAS v12. -[Issue \#4345](https://redmine.postgresql.org/issues/4345) - Capitalize the word 'export' used in Import/Export module. -[Issue \#4349](https://redmine.postgresql.org/issues/4349) - Ensure strings are properly encoded in the Query History. -[Issue \#4350](https://redmine.postgresql.org/issues/4350) - Ensure we include the CSRF token when uploading files. -[Issue \#4360](https://redmine.postgresql.org/issues/4360) - Ensure the debugger control buttons are only enabled once initialisation is complete. -[Issue \#4362](https://redmine.postgresql.org/issues/4362) - Remove additional "SETOF" included when generating CREATE scripts for trigger functions. -[Issue \#4365](https://redmine.postgresql.org/issues/4365) - Fix help links for backup globals and backup server. -[Issue \#4367](https://redmine.postgresql.org/issues/4367) - Fix an XSS issue seen in View/Edit data mode if a column name includes HTML. -[Issue \#4378](https://redmine.postgresql.org/issues/4378) - Ensure Python escaping matched JS escaping and fix a minor XSS issue in the Query Tool that required superuser access to trigger. -[Issue \#4380](https://redmine.postgresql.org/issues/4380) - Ensure that both columns and partitions can be edited at the same time in the table dialog. -[Issue \#4386](https://redmine.postgresql.org/issues/4386) - Fix an XSS issue when username contains XSS vulnerable text. -[Issue \#4389](https://redmine.postgresql.org/issues/4389) - Fix an error that could be seen when editing column privileges. -[Issue \#4393](https://redmine.postgresql.org/issues/4393) - Ensure parameter values are quoted when needed when editing roles. -[Issue \#4403](https://redmine.postgresql.org/issues/4403) - Ensure the browser close confirmation is only shown when closing a Query Tool which is running in a separate browser tab. -[Issue \#4404](https://redmine.postgresql.org/issues/4404) - Prevent an error that may occur when editing data with an integer primary key. -[Issue \#4407](https://redmine.postgresql.org/issues/4407) - Fix a quoting issue that caused a blank UI to be displayed when running in French. -[Issue \#4427](https://redmine.postgresql.org/issues/4427) - Fix an error while retrieving json data from the table. -[Issue \#4428](https://redmine.postgresql.org/issues/4428) - Fix 'malformed array literal' error when updating a pgAgent job. -[Issue \#4437](https://redmine.postgresql.org/issues/4437) - Fix table icon issue when updating any existing field. -[Issue \#4446](https://redmine.postgresql.org/issues/4446) - Use ROLE consistently when generating RE-SQL for roles, not USER. -[Issue \#4448](https://redmine.postgresql.org/issues/4448) - Fix an error seen when updating a connection string in a pgAgent job step. -[Issue \#4462](https://redmine.postgresql.org/issues/4462) - Fix some minor UI issues on IE11. -[Issue \#4470](https://redmine.postgresql.org/issues/4470) - Fix sequence reverse engineered SQL generation with quoted names on PG/EPAS 10+. -[Issue \#4484](https://redmine.postgresql.org/issues/4484) - Fix an issue where Explain and Explain Analyze are not working, it's regression of \#1760. -[Issue \#4485](https://redmine.postgresql.org/issues/4485) - Fix an issue where Filter toolbar button is not working in view/edit data, it's regression of keyboard navigation. +Issue #PEM-2459 - Tuning wizard shows wrong original value for 'max_wal_size' parameter \[Support Ticket #880186] +Issue #PEM-2499 - Index Advisor is not suggesting index on PEM UI. \[Support Ticket #891318] +Issue #PEM-2503 - Issue while scheduling the pgAgent job on PEM UI. \[Support Ticket #891220] +[Issue #3919](https://redmine.postgresql.org/issues/3919) - Allow keyboard navigation of all controls on subnode grids. +[Issue #3994](https://redmine.postgresql.org/issues/3994) - Fix issue where the dependencies tab for inherited tables/foreign keys shows partial text. +[Issue #3996](https://redmine.postgresql.org/issues/3996) - Fix dropping of pgAgent schedules through the Job properties. +[Issue #4036](https://redmine.postgresql.org/issues/4036) - Allow editing of data where a primary key column includes a % sign in the value. +[Issue #4162](https://redmine.postgresql.org/issues/4162) - Fix syntax error when adding more than one column to the existing table. +[Issue #4169](https://redmine.postgresql.org/issues/4169) - Omit the geometry viewer in the Query Tool from layout saving. +[Issue #4171](https://redmine.postgresql.org/issues/4171) - Fix issue where reverse engineered SQL was failing for foreign tables, if it had "=" in the options. +[Issue #4195](https://redmine.postgresql.org/issues/4195) - Fix keyboard navigation in "inner" tabsets such as the Query Tool and Debugger. +[Issue #4224](https://redmine.postgresql.org/issues/4224) - Prevent flickering of large tooltips on the Graphical EXPLAIN canvas. +[Issue #4228](https://redmine.postgresql.org/issues/4228) - Ensure the correct label is used in panel headers when viewing filtered rows. +[Issue #4253](https://redmine.postgresql.org/issues/4253) - Fix issue where new column should be created with Default value. +[Issue #4255](https://redmine.postgresql.org/issues/4255) - Prevent the geometry viewer grabbing key presses when not in focus under Firefox, IE and Edge. +[Issue #4284](https://redmine.postgresql.org/issues/4284) - Fix syntax error when creating a table with a serial column. +[Issue #4320](https://redmine.postgresql.org/issues/4320) - Fix issue where SSH tunnel connection using password is failing, it's regression of Master Password. +[Issue #4329](https://redmine.postgresql.org/issues/4329) - Fix an initialisation error when two functions with parameters are debugged in parallel. +[Issue #4343](https://redmine.postgresql.org/issues/4343) - Fix issue where property dialog of column should open properly for EPAS v12. +[Issue #4345](https://redmine.postgresql.org/issues/4345) - Capitalize the word 'export' used in Import/Export module. +[Issue #4349](https://redmine.postgresql.org/issues/4349) - Ensure strings are properly encoded in the Query History. +[Issue #4350](https://redmine.postgresql.org/issues/4350) - Ensure we include the CSRF token when uploading files. +[Issue #4360](https://redmine.postgresql.org/issues/4360) - Ensure the debugger control buttons are only enabled once initialisation is complete. +[Issue #4362](https://redmine.postgresql.org/issues/4362) - Remove additional "SETOF" included when generating CREATE scripts for trigger functions. +[Issue #4365](https://redmine.postgresql.org/issues/4365) - Fix help links for backup globals and backup server. +[Issue #4367](https://redmine.postgresql.org/issues/4367) - Fix an XSS issue seen in View/Edit data mode if a column name includes HTML. +[Issue #4378](https://redmine.postgresql.org/issues/4378) - Ensure Python escaping matched JS escaping and fix a minor XSS issue in the Query Tool that required superuser access to trigger. +[Issue #4380](https://redmine.postgresql.org/issues/4380) - Ensure that both columns and partitions can be edited at the same time in the table dialog. +[Issue #4386](https://redmine.postgresql.org/issues/4386) - Fix an XSS issue when username contains XSS vulnerable text. +[Issue #4389](https://redmine.postgresql.org/issues/4389) - Fix an error that could be seen when editing column privileges. +[Issue #4393](https://redmine.postgresql.org/issues/4393) - Ensure parameter values are quoted when needed when editing roles. +[Issue #4403](https://redmine.postgresql.org/issues/4403) - Ensure the browser close confirmation is only shown when closing a Query Tool which is running in a separate browser tab. +[Issue #4404](https://redmine.postgresql.org/issues/4404) - Prevent an error that may occur when editing data with an integer primary key. +[Issue #4407](https://redmine.postgresql.org/issues/4407) - Fix a quoting issue that caused a blank UI to be displayed when running in French. +[Issue #4427](https://redmine.postgresql.org/issues/4427) - Fix an error while retrieving json data from the table. +[Issue #4428](https://redmine.postgresql.org/issues/4428) - Fix 'malformed array literal' error when updating a pgAgent job. +[Issue #4437](https://redmine.postgresql.org/issues/4437) - Fix table icon issue when updating any existing field. +[Issue #4446](https://redmine.postgresql.org/issues/4446) - Use ROLE consistently when generating RE-SQL for roles, not USER. +[Issue #4448](https://redmine.postgresql.org/issues/4448) - Fix an error seen when updating a connection string in a pgAgent job step. +[Issue #4462](https://redmine.postgresql.org/issues/4462) - Fix some minor UI issues on IE11. +[Issue #4470](https://redmine.postgresql.org/issues/4470) - Fix sequence reverse engineered SQL generation with quoted names on PG/EPAS 10+. +[Issue #4484](https://redmine.postgresql.org/issues/4484) - Fix an issue where Explain and Explain Analyze are not working, it's regression of #1760. +[Issue #4485](https://redmine.postgresql.org/issues/4485) - Fix an issue where Filter toolbar button is not working in view/edit data, it's regression of keyboard navigation. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/11_pem_release_notes_7_8.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/11_pem_release_notes_7_8.mdx index 89f9ebbd445..121fe3814f4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/11_pem_release_notes_7_8.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/11_pem_release_notes_7_8.mdx @@ -2,78 +2,76 @@ title: "PEM v7.8" --- - - Release date: 2019-06-05 ## Features -Feature \#PEM-614 - Added --enable-heartbeat-connection parameter to use dedicated heartbeat connection while self registring pemagent. \[Support Ticket \#742120\] -[Feature \#4017](https://redmine.postgresql.org/issues/4017) - Make the Query Tool history persistent across sessions. -[Feature \#4018](https://redmine.postgresql.org/issues/4018) - Remove the large and unnecessary dependency on React and 87 other related libraries. -[Feature \#4030](https://redmine.postgresql.org/issues/4030) - Add support for IDENTITY columns. +Feature #PEM-614 - Added --enable-heartbeat-connection parameter to use dedicated heartbeat connection while self registring pemagent. \[Support Ticket #742120] +[Feature #4017](https://redmine.postgresql.org/issues/4017) - Make the Query Tool history persistent across sessions. +[Feature #4018](https://redmine.postgresql.org/issues/4018) - Remove the large and unnecessary dependency on React and 87 other related libraries. +[Feature #4030](https://redmine.postgresql.org/issues/4030) - Add support for IDENTITY columns. ## Bug fixes -Bug \#PEM-986 - Fetch the pem schema version again in pemagent when PEM server restarts. \[Support Ticket \#769569\] -Bug \#PEM-1449 - Copy alert REST API document is missing. \[Support Ticket \#804272\] -Bug \#PEM-2046 - Use the 'g' flag to replace all occurance of placeholder in the alert script code, alert email template, alert email subject. \[Support Ticket \#856746\] -Bug \#PEM-2087 - Line chart on Dashboard is not honouring the user's selected start & end timespan. \[Support Ticket \#859558\] -Bug \#PEM-2135 - No take over is happening after PEM agent and server upgrade from version 7.4 to 7.7. \[Support Ticket \#859340\] -Bug \#PEM-2166 - Table chart created using custom probe should render properly. \[Support Ticket \#872498\] -[Bug \#4217](https://redmine.postgresql.org/issues/4217)/PEM-2204 - Fixed CSRF security vulnerability issue. -[Bug \#1269](https://redmine.postgresql.org/issues/1269) - Fix naming inconsistency for the column and FTS parser modules. -[Bug \#2392](https://redmine.postgresql.org/issues/2392) - Ensure that on clicking Delete button should not delete rows immediately from the database server, it should be deleted when Save button will be clicked. -[Bug \#2627](https://redmine.postgresql.org/issues/2627) - Include inherited column comments and defaults in reverse engineered table SQL. -[Bug \#3582](https://redmine.postgresql.org/issues/3582) - Ensure that JSON strings as comments should be added properly for all the objects. -[Bug \#3605](https://redmine.postgresql.org/issues/3605) - Fix an issue where Deleting N number of rows makes first N number of rows disable. -[Bug \#3933](https://redmine.postgresql.org/issues/3933) - Ignore exceptions in the logger. -[Bug \#3938](https://redmine.postgresql.org/issues/3938) - Added support for Default Partition. -[Bug \#4019](https://redmine.postgresql.org/issues/4019) - Update all Python and JavaScript dependencies. -[Bug \#4037](https://redmine.postgresql.org/issues/4037) - Include comment SQL for inherited columns in reverse engineered table SQL. -[Bug \#4050](https://redmine.postgresql.org/issues/4050) - Make the WHEN field a CodeMirror control on the Event Trigger dialogue. -[Bug \#4052](https://redmine.postgresql.org/issues/4052) - Fix the online help button on the resource group dialogue. -[Bug \#4053](https://redmine.postgresql.org/issues/4053) - Enable the online help button on the index dialogue. -[Bug \#4062](https://redmine.postgresql.org/issues/4062) - Fix handling of numeric arrays in View/Edit Data. -[Bug \#4063](https://redmine.postgresql.org/issues/4063) - Enlarge the grab handles for resizing dialogs etc. -[Bug \#4069](https://redmine.postgresql.org/issues/4069) - Append the file suffix to filenames when needed in the File Create dialogue. -[Bug \#4073](https://redmine.postgresql.org/issues/4073) - Change the CodeMirror active line background colour to $color-danger-lighter so it doesn't conflict with the selection colour. -[Bug \#4081](https://redmine.postgresql.org/issues/4081) - Fix the RE-SQL syntax for roles with a VALID UNTIL clause. -[Bug \#4082](https://redmine.postgresql.org/issues/4082) - Prevent an empty error message being shown when "downloading" a CREATE script using the CSV download. -[Bug \#4084](https://redmine.postgresql.org/issues/4084) - Overhaul the layout saving code so it includes the Query Tool and Debugger, and stores the layout when change events are detected rather than (unreliably) on exit. -[Bug \#4085](https://redmine.postgresql.org/issues/4085) - Display errors during CSV download from the Query Tool in the UI rather than putting them in the CSV file. -[Bug \#4087](https://redmine.postgresql.org/issues/4087) - Fix an issue where 'GRANT UPDATE' sql should be displayed for default sequence privileges. -[Bug \#4096](https://redmine.postgresql.org/issues/4096) - Ensure the toolbar buttons are properly reset following a CSV download in the Query Tool. -[Bug \#4099](https://redmine.postgresql.org/issues/4099) - Fix SQL help for EPAS 10+, and refactor the URL generation code into a testable function. -[Bug \#4100](https://redmine.postgresql.org/issues/4100) - Ensure sequences can be created with increment, start, minimum and maximum options set. -[Bug \#4101](https://redmine.postgresql.org/issues/4101) - Ensure that confirmation dialog should be popped up before reload of query tool or debugger if it is opened in a new browser tab. -[Bug \#4104](https://redmine.postgresql.org/issues/4104) - Ensure that record should be add/edited for root partition table with primary keys. -[Bug \#4105](https://redmine.postgresql.org/issues/4105) - Fix an issue where JSON data would not be rendered in the Query Tool. -[Bug \#4109](https://redmine.postgresql.org/issues/4109) - Ensure View/Materialized View node should be visible after updating any property. -[Bug \#4110](https://redmine.postgresql.org/issues/4110) - Fix custom autovacuum configuration for Materialized Views. -[Bug \#4121](https://redmine.postgresql.org/issues/4121) - Fixed alignment issue of columns in definition section of Index node. -[Bug \#4131](https://redmine.postgresql.org/issues/4131) - Relabel the Save button on the datagrid text editor to avoid confusion with the actual Save button that updates the database. -[Bug \#4134](https://redmine.postgresql.org/issues/4134) - Fixed 'Location cannot be empty' error when open Tablespace properties. -[Bug \#4138](https://redmine.postgresql.org/issues/4138) - Fix an issue where the dropdown becomes misaligned/displaced. -[Bug \#4142](https://redmine.postgresql.org/issues/4142) - Added recommended ESLinter checks. -[Bug \#4143](https://redmine.postgresql.org/issues/4143) - Ensure that pgAdmin4 should work properly with psycopg2 v2.8 -[Bug \#4154](https://redmine.postgresql.org/issues/4154) - Ensure the treeview shows all sequences except those used to implement IDENTITY columns (which can be edited as part of the column). Show all if Show System Objects is enabled. -[Bug \#4160](https://redmine.postgresql.org/issues/4160) - Fixed 'Increment value cannot be empty' error for existing tables. -[Bug \#4161](https://redmine.postgresql.org/issues/4161) - Ensure that parameters of procedures for EPAS server 10 and below should be set/reset properly. -[Bug \#4163](https://redmine.postgresql.org/issues/4163) - Prevent duplicate columns being included in reverse engineered SQL for tables. -[Bug \#4164](https://redmine.postgresql.org/issues/4164) - Fix file browser path issue which occurs when client is on Windows and server is on Mac/Linux. -[Bug \#4182](https://redmine.postgresql.org/issues/4182) - Ensure sanity of the permissions on the storage and session directories and the config database. -[Bug \#4218](https://redmine.postgresql.org/issues/4218) - Properly assign dropdownParent in Select2 controls. -[Bug \#4219](https://redmine.postgresql.org/issues/4219) - Ensure popper.js is installed when needed. -[Bug \#4246](https://redmine.postgresql.org/issues/4246) - Fixed console error when subnode control is used in panels. -[Bug \#4194](https://redmine.postgresql.org/issues/4194) - Fix accessibility issue for menu navigation. -[Bug \#4227](https://redmine.postgresql.org/issues/4227) - Fixed Tab key navigation for Maintenance dialog. -[Bug \#4244](https://redmine.postgresql.org/issues/4244) - Fix Tab key issue for Toggle switch controls and button on the dialog footer in Safari browser. -[Bug \#4245](https://redmine.postgresql.org/issues/4245) - Ensure that element should get highlighted when they get focus on using Tab key. -[Bug \#4261](https://redmine.postgresql.org/issues/4261) - Stop using application/x-javascript as a mime type and use the RFC-compliant application/javascript instead. -[Bug \#4262](https://redmine.postgresql.org/issues/4262) - Fixed error on displaying table properties of a table partitioned by list having a default partition. -[Bug \#4263](https://redmine.postgresql.org/issues/4263) - Fix handling of JSON in the Query Tool with NULL elements. -[Bug \#4269](https://redmine.postgresql.org/issues/4269) - Fix navigation of switch cells in grids. -[Bug \#4276](https://redmine.postgresql.org/issues/4276) - Relax the permission check on the directory containing the config database, as it may fail in some environments such as OpenShift. -[Bug \#4278](https://redmine.postgresql.org/issues/4278) - Prevent Backgrid Password cells from losing focus if the browser opens an autocomplete list. -[Bug \#4308](https://redmine.postgresql.org/issues/4308) - Fix the issue of accessing the SQL for Views and Materialized Views. Regression of pluralisation of folder names. +Bug #PEM-986 - Fetch the pem schema version again in pemagent when PEM server restarts. \[Support Ticket #769569] +Bug #PEM-1449 - Copy alert REST API document is missing. \[Support Ticket #804272] +Bug #PEM-2046 - Use the 'g' flag to replace all occurance of placeholder in the alert script code, alert email template, alert email subject. \[Support Ticket #856746] +Bug #PEM-2087 - Line chart on Dashboard is not honouring the user's selected start & end timespan. \[Support Ticket #859558] +Bug #PEM-2135 - No take over is happening after PEM agent and server upgrade from version 7.4 to 7.7. \[Support Ticket #859340] +Bug #PEM-2166 - Table chart created using custom probe should render properly. \[Support Ticket #872498] +[Bug #4217](https://redmine.postgresql.org/issues/4217)/PEM-2204 - Fixed CSRF security vulnerability issue. +[Bug #1269](https://redmine.postgresql.org/issues/1269) - Fix naming inconsistency for the column and FTS parser modules. +[Bug #2392](https://redmine.postgresql.org/issues/2392) - Ensure that on clicking Delete button should not delete rows immediately from the database server, it should be deleted when Save button will be clicked. +[Bug #2627](https://redmine.postgresql.org/issues/2627) - Include inherited column comments and defaults in reverse engineered table SQL. +[Bug #3582](https://redmine.postgresql.org/issues/3582) - Ensure that JSON strings as comments should be added properly for all the objects. +[Bug #3605](https://redmine.postgresql.org/issues/3605) - Fix an issue where Deleting N number of rows makes first N number of rows disable. +[Bug #3933](https://redmine.postgresql.org/issues/3933) - Ignore exceptions in the logger. +[Bug #3938](https://redmine.postgresql.org/issues/3938) - Added support for Default Partition. +[Bug #4019](https://redmine.postgresql.org/issues/4019) - Update all Python and JavaScript dependencies. +[Bug #4037](https://redmine.postgresql.org/issues/4037) - Include comment SQL for inherited columns in reverse engineered table SQL. +[Bug #4050](https://redmine.postgresql.org/issues/4050) - Make the WHEN field a CodeMirror control on the Event Trigger dialogue. +[Bug #4052](https://redmine.postgresql.org/issues/4052) - Fix the online help button on the resource group dialogue. +[Bug #4053](https://redmine.postgresql.org/issues/4053) - Enable the online help button on the index dialogue. +[Bug #4062](https://redmine.postgresql.org/issues/4062) - Fix handling of numeric arrays in View/Edit Data. +[Bug #4063](https://redmine.postgresql.org/issues/4063) - Enlarge the grab handles for resizing dialogs etc. +[Bug #4069](https://redmine.postgresql.org/issues/4069) - Append the file suffix to filenames when needed in the File Create dialogue. +[Bug #4073](https://redmine.postgresql.org/issues/4073) - Change the CodeMirror active line background colour to $color-danger-lighter so it doesn't conflict with the selection colour. +[Bug #4081](https://redmine.postgresql.org/issues/4081) - Fix the RE-SQL syntax for roles with a VALID UNTIL clause. +[Bug #4082](https://redmine.postgresql.org/issues/4082) - Prevent an empty error message being shown when "downloading" a CREATE script using the CSV download. +[Bug #4084](https://redmine.postgresql.org/issues/4084) - Overhaul the layout saving code so it includes the Query Tool and Debugger, and stores the layout when change events are detected rather than (unreliably) on exit. +[Bug #4085](https://redmine.postgresql.org/issues/4085) - Display errors during CSV download from the Query Tool in the UI rather than putting them in the CSV file. +[Bug #4087](https://redmine.postgresql.org/issues/4087) - Fix an issue where 'GRANT UPDATE' sql should be displayed for default sequence privileges. +[Bug #4096](https://redmine.postgresql.org/issues/4096) - Ensure the toolbar buttons are properly reset following a CSV download in the Query Tool. +[Bug #4099](https://redmine.postgresql.org/issues/4099) - Fix SQL help for EPAS 10+, and refactor the URL generation code into a testable function. +[Bug #4100](https://redmine.postgresql.org/issues/4100) - Ensure sequences can be created with increment, start, minimum and maximum options set. +[Bug #4101](https://redmine.postgresql.org/issues/4101) - Ensure that confirmation dialog should be popped up before reload of query tool or debugger if it is opened in a new browser tab. +[Bug #4104](https://redmine.postgresql.org/issues/4104) - Ensure that record should be add/edited for root partition table with primary keys. +[Bug #4105](https://redmine.postgresql.org/issues/4105) - Fix an issue where JSON data would not be rendered in the Query Tool. +[Bug #4109](https://redmine.postgresql.org/issues/4109) - Ensure View/Materialized View node should be visible after updating any property. +[Bug #4110](https://redmine.postgresql.org/issues/4110) - Fix custom autovacuum configuration for Materialized Views. +[Bug #4121](https://redmine.postgresql.org/issues/4121) - Fixed alignment issue of columns in definition section of Index node. +[Bug #4131](https://redmine.postgresql.org/issues/4131) - Relabel the Save button on the datagrid text editor to avoid confusion with the actual Save button that updates the database. +[Bug #4134](https://redmine.postgresql.org/issues/4134) - Fixed 'Location cannot be empty' error when open Tablespace properties. +[Bug #4138](https://redmine.postgresql.org/issues/4138) - Fix an issue where the dropdown becomes misaligned/displaced. +[Bug #4142](https://redmine.postgresql.org/issues/4142) - Added recommended ESLinter checks. +[Bug #4143](https://redmine.postgresql.org/issues/4143) - Ensure that pgAdmin4 should work properly with psycopg2 v2.8 +[Bug #4154](https://redmine.postgresql.org/issues/4154) - Ensure the treeview shows all sequences except those used to implement IDENTITY columns (which can be edited as part of the column). Show all if Show System Objects is enabled. +[Bug #4160](https://redmine.postgresql.org/issues/4160) - Fixed 'Increment value cannot be empty' error for existing tables. +[Bug #4161](https://redmine.postgresql.org/issues/4161) - Ensure that parameters of procedures for EPAS server 10 and below should be set/reset properly. +[Bug #4163](https://redmine.postgresql.org/issues/4163) - Prevent duplicate columns being included in reverse engineered SQL for tables. +[Bug #4164](https://redmine.postgresql.org/issues/4164) - Fix file browser path issue which occurs when client is on Windows and server is on Mac/Linux. +[Bug #4182](https://redmine.postgresql.org/issues/4182) - Ensure sanity of the permissions on the storage and session directories and the config database. +[Bug #4218](https://redmine.postgresql.org/issues/4218) - Properly assign dropdownParent in Select2 controls. +[Bug #4219](https://redmine.postgresql.org/issues/4219) - Ensure popper.js is installed when needed. +[Bug #4246](https://redmine.postgresql.org/issues/4246) - Fixed console error when subnode control is used in panels. +[Bug #4194](https://redmine.postgresql.org/issues/4194) - Fix accessibility issue for menu navigation. +[Bug #4227](https://redmine.postgresql.org/issues/4227) - Fixed Tab key navigation for Maintenance dialog. +[Bug #4244](https://redmine.postgresql.org/issues/4244) - Fix Tab key issue for Toggle switch controls and button on the dialog footer in Safari browser. +[Bug #4245](https://redmine.postgresql.org/issues/4245) - Ensure that element should get highlighted when they get focus on using Tab key. +[Bug #4261](https://redmine.postgresql.org/issues/4261) - Stop using application/x-javascript as a mime type and use the RFC-compliant application/javascript instead. +[Bug #4262](https://redmine.postgresql.org/issues/4262) - Fixed error on displaying table properties of a table partitioned by list having a default partition. +[Bug #4263](https://redmine.postgresql.org/issues/4263) - Fix handling of JSON in the Query Tool with NULL elements. +[Bug #4269](https://redmine.postgresql.org/issues/4269) - Fix navigation of switch cells in grids. +[Bug #4276](https://redmine.postgresql.org/issues/4276) - Relax the permission check on the directory containing the config database, as it may fail in some environments such as OpenShift. +[Bug #4278](https://redmine.postgresql.org/issues/4278) - Prevent Backgrid Password cells from losing focus if the browser opens an autocomplete list. +[Bug #4308](https://redmine.postgresql.org/issues/4308) - Fix the issue of accessing the SQL for Views and Materialized Views. Regression of pluralisation of folder names. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/12_pem_release_notes_7_7_1.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/12_pem_release_notes_7_7_1.mdx index b0f27aa36c5..6082a0feb2a 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/12_pem_release_notes_7_7_1.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/12_pem_release_notes_7_7_1.mdx @@ -2,13 +2,11 @@ title: "PEM v7.7.1" --- - - Release date: 2019-04-10 This release contains bug fixes. ## Bug fixes -Bug \#PEM-2092 - Remove the obsolete packages while upgrading the edb-pem-server (v7.5 and ealier) using the RPM -Bug \#PEM-2089 - Zooming on a line chart does not working when line charts are linked. +Bug #PEM-2092 - Remove the obsolete packages while upgrading the edb-pem-server (v7.5 and ealier) using the RPM +Bug #PEM-2089 - Zooming on a line chart does not working when line charts are linked. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/13_pem_release_notes_7_7.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/13_pem_release_notes_7_7.mdx index 74399b791b6..6389b20f8e4 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/13_pem_release_notes_7_7.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/13_pem_release_notes_7_7.mdx @@ -2,111 +2,109 @@ title: "PEM v7.7" --- - - Release date: 2019-03-13 This release contains a new PEM UI update and some of the features and bug fixes. ## Features -[Feature \#2233](https://redmine.postgresql.org/issues/2233) - Add a "scratch pad" to the Query Tool to hold text snippets whilst editing. -[Feature \#2418](https://redmine.postgresql.org/issues/2418) - Add Commit and Rollback buttons to the Query Tool. -[Feature \#3439](https://redmine.postgresql.org/issues/3439) - Allow X-FRAME-OPTIONS to be set for security. Default to SAMEORIGIN. -[Feature \#3559](https://redmine.postgresql.org/issues/3559) - Automatically expand child nodes as well as the selected node on the treeview if there is only one. -[Feature \#4034](https://redmine.postgresql.org/issues/4034) - Support double-click on Query Tool result grid column resize handles to auto-size to the content. +[Feature #2233](https://redmine.postgresql.org/issues/2233) - Add a "scratch pad" to the Query Tool to hold text snippets whilst editing. +[Feature #2418](https://redmine.postgresql.org/issues/2418) - Add Commit and Rollback buttons to the Query Tool. +[Feature #3439](https://redmine.postgresql.org/issues/3439) - Allow X-FRAME-OPTIONS to be set for security. Default to SAMEORIGIN. +[Feature #3559](https://redmine.postgresql.org/issues/3559) - Automatically expand child nodes as well as the selected node on the treeview if there is only one. +[Feature #4034](https://redmine.postgresql.org/issues/4034) - Support double-click on Query Tool result grid column resize handles to auto-size to the content. ## Bug fixes -Bug \#PEM-732 - Allow user to download explain plan from sql profiler \[Support Ticket \#484049\] -Bug \#PEM-1544 - Give proper privileges and explicit type cast to hstore extension \[Support Ticket \#820063\] -Bug \#PEM-1596 - PEM Agent registration creates cert files with additional .crt extension \[Support Ticket \#853579\] -Bug \#PEM-1684 - Handle proper exception during alert processing. \[Support Ticket \#830175\] -Bug \#PEM-1743 - Delete all the agent entry except for the lastest one sorted by login time \[Support Ticket \#829037\] -Bug \#PEM-1855 - Server info probe shows numeric overflow error if shared memory size is too high \[Support Ticket \#833535\] -Bug \#PEM-1943 - Honour the pause of the auto-refresh on the 'Alert Status' table whilst showing the alert details \[Support Ticket \#853200\] -Bug \#PEM-1964 - RPM libboost dependencies issues on IBM powerbox \[Support Ticket \#849462\] -Bug \#PEM-1976 - Fetch the session information for the same server when fetching the lock information \[Support Ticket \#853549\] -[Bug \#3051](https://redmine.postgresql.org/issues/3051) - Replace Bootstrap switch with Bootstrap4 toggle to improve the performance. -[Bug \#3096](https://redmine.postgresql.org/issues/3096) - Ensure size stats are prettified on the statistics tab when the UI language is not English. -[Bug \#3272](https://redmine.postgresql.org/issues/3272) - Replace the PyCrypto module with the cryptography module. -[Bug \#3352](https://redmine.postgresql.org/issues/3352) - Handle display of roles with expiration set to infinity correctly. -[Bug \#3418](https://redmine.postgresql.org/issues/3418) - Allow editing of values in columns with the oid datatype which are not an actual row OID. -[Bug \#3453](https://redmine.postgresql.org/issues/3453) - Fixed SQL for foreign table options. -[Bug \#3475](https://redmine.postgresql.org/issues/3475) - Fixed execution time to show Hours part for long running queries in Query Tool. -[Bug \#3505](https://redmine.postgresql.org/issues/3505) - Fix SQL generated for tables with inherited columns. -[Bug \#3544](https://redmine.postgresql.org/issues/3544) - Make the Query Tool tab titles more concise and useful. -[Bug \#3549](https://redmine.postgresql.org/issues/3549) - Display event trigger functions correctly on EPAS. -[Bug \#3575](https://redmine.postgresql.org/issues/3575) - Ensure the context menu works after a server is renamed. -[Bug \#3583](https://redmine.postgresql.org/issues/3583) - Update CodeMirror to 5.43.0 to resolve issues with auto-indent. -[Bug \#3587](https://redmine.postgresql.org/issues/3587) - Fix support for bigint's in JSONB data. -[Bug \#3600](https://redmine.postgresql.org/issues/3600) - Ensure JSON data isn't modified in-flight by psycopg2 when using View/Edit data. -[Bug \#3608](https://redmine.postgresql.org/issues/3608) - Messages tab of query tool should be clear on subsequent execution of table/view using View/Edit Data. -[Bug \#3609](https://redmine.postgresql.org/issues/3609) - Clear drop-down menu should be disabled for View/Edit Data. -[Bug \#3664](https://redmine.postgresql.org/issues/3664) - Fixed Statistics panel hang issue for 1000+ tables. -[Bug \#3673](https://redmine.postgresql.org/issues/3673) - Modify the Download as CSV option to use the same connection as the Query Tool its running in so temporary tables etc. can be used. -[Bug \#3679](https://redmine.postgresql.org/issues/3679) - Fix a webpack issue that could cause the Query Tool to fail to render. -[Bug \#3693](https://redmine.postgresql.org/issues/3693) - Proper error should be thrown when server group is created with existing name. -[Bug \#3695](https://redmine.postgresql.org/issues/3695) - Ensure long string should be wrap in alertify dialogs. -[Bug \#3697](https://redmine.postgresql.org/issues/3697) - Ensure that output of the query should be displayed even if Data Output window is detached from the Query Tool. -[Bug \#3702](https://redmine.postgresql.org/issues/3702) - Ensure we display the relation name (and not the OID) in the locks table wherever possible. -[Bug \#3740](https://redmine.postgresql.org/issues/3740) - Inline edbspl trigger functions should not be visible in Grant Wizard. -[Bug \#3774](https://redmine.postgresql.org/issues/3774) - Proper SQL should be generated when create function with return type as custom type argument. -[Bug \#3780](https://redmine.postgresql.org/issues/3780) - Ensure that null values handled properly in CSV download. -[Bug \#3800](https://redmine.postgresql.org/issues/3800) - Ensure that database restriction of server dialog should work with special characters. -[Bug \#3809](https://redmine.postgresql.org/issues/3809) - Ensure auto complete should works when first identifier in the FROM clause needs quoting. -[Bug \#3810](https://redmine.postgresql.org/issues/3810) - Ensure auto complete should works for columns from a schema-qualified table. -[Bug \#3811](https://redmine.postgresql.org/issues/3811) - Ensure that Backup/Restore button should work on single click. -[Bug \#3836](https://redmine.postgresql.org/issues/3836) - Fix ordering of VACUUM options which changed in PG11. -[Bug \#3837](https://redmine.postgresql.org/issues/3837) - Fixed SQL for when clause while creating Trigger. -[Bug \#3838](https://redmine.postgresql.org/issues/3838) - Proper SQL should be generated when creating/changing column with custom type argument. -[Bug \#3840](https://redmine.postgresql.org/issues/3840) - Ensure that file format combo box value should be retained when hidden files checkbox is toggled. -[Bug \#3842](https://redmine.postgresql.org/issues/3842) - Don't show system catalogs in the schemas property list unless show system objects is enabled. -[Bug \#3846](https://redmine.postgresql.org/issues/3846) - Proper SQL should be generated when create procedure with custom type arguments. -[Bug \#3849](https://redmine.postgresql.org/issues/3849) - Ensure that browser should warn before close or refresh. -[Bug \#3850](https://redmine.postgresql.org/issues/3850) - Fixed EXEC script for procedures. -[Bug \#3853](https://redmine.postgresql.org/issues/3853) - Proper SQL should be generated when create domain of type interval with precision. -[Bug \#3856](https://redmine.postgresql.org/issues/3856) - Fixed an issue while creating export job. -[Bug \#3858](https://redmine.postgresql.org/issues/3858) - Drop-down should be closed when click on any other toolbar button. -[Bug \#3861](https://redmine.postgresql.org/issues/3861) - Fix help for the backup/restore dialogues. -[Bug \#3862](https://redmine.postgresql.org/issues/3862) - Fixed keyboard navigation for dialog tabs. -[Bug \#3865](https://redmine.postgresql.org/issues/3865) - Increase frames splitter mouse hover area to make it easier to resize. -[Bug \#3866](https://redmine.postgresql.org/issues/3866) - Ensure that last row of table data should be visible and user will be able to add new row. -[Bug \#3871](https://redmine.postgresql.org/issues/3871) - Fixed alignment of tree arrow icons for Internet Explorer. -[Bug \#3872](https://redmine.postgresql.org/issues/3872) - Ensure object names in external process dialogues are properly escaped. -[Bug \#3873](https://redmine.postgresql.org/issues/3873) - Fix context sub-menu alignment on Safari. -[Bug \#3877](https://redmine.postgresql.org/issues/3877) - Make the browser more robust in the face of multibyte characters in SQL\_ASCII databases. -[Bug \#3891](https://redmine.postgresql.org/issues/3891) - Correct order of Save and Cancel button for json/jsonb editing. -[Bug \#3897](https://redmine.postgresql.org/issues/3897) - Data should be updated properly for FTS Configurations, FTS Dictionaries, FTS Parsers and FTS Templates. -[Bug \#3899](https://redmine.postgresql.org/issues/3899) - Fixed unable to drop multiple Rules and Foreign Tables from properties tab. -[Bug \#3903](https://redmine.postgresql.org/issues/3903) - Fixed Query Tool Initialization Error. -[Bug \#3906](https://redmine.postgresql.org/issues/3906) - Fix alignment of Close and Maximize button of Grant Wizard. -[Bug \#3908](https://redmine.postgresql.org/issues/3908) - Fixed keyboard navigation for Select2 and Privilege cell in Backgrid. -[Bug \#3911](https://redmine.postgresql.org/issues/3911) - Add full support and tests for all PG server side encodings. -[Bug \#3912](https://redmine.postgresql.org/issues/3912) - Fix editing of table data with a JSON primary key. -[Bug \#3916](https://redmine.postgresql.org/issues/3916) - Correct schema should be displayed in Materialized View dialog. -[Bug \#3927](https://redmine.postgresql.org/issues/3927) - Fixed debugger issue for procedure inside package for EPAS servers. -[Bug \#3929](https://redmine.postgresql.org/issues/3929) - Fix alignment of help messages in properties panels. -[Bug \#3932](https://redmine.postgresql.org/issues/3932) - Fix alignment of submenu for Internet Explorer. -[Bug \#3935](https://redmine.postgresql.org/issues/3935) - Ensure that grant wizard should list down functions for EPAS server running with no-redwood-compat mode. \[Support Ticket \#833292\] -[Bug \#3941](https://redmine.postgresql.org/issues/3941) - Dashboard graph optimization. -[Bug \#3946](https://redmine.postgresql.org/issues/3946) - Fix alignment of checkbox to drop multiple schedules of pgAgent job. -[Bug \#3948](https://redmine.postgresql.org/issues/3948) - Set the background colour for backform notes, and add an icon. -[Bug \#3954](https://redmine.postgresql.org/issues/3954) - Remove Python 2.6 code that's now obsolete. -[Bug \#3958](https://redmine.postgresql.org/issues/3958) - Don't exclude SELECT statements from transaction management in the Query Tool in case they call data-modifying functions. -[Bug \#3959](https://redmine.postgresql.org/issues/3959) - Optimise display of Dependencies and Dependents, and use on-demand loading of rows in batches of 100. -[Bug \#3961](https://redmine.postgresql.org/issues/3961) - Exclude HTTPExceptions from the all\_exception\_handler as they should be returned as-is. -[Bug \#3963](https://redmine.postgresql.org/issues/3963) - Fix alignment of import/export toggle switch. -[Bug \#3968](https://redmine.postgresql.org/issues/3968) - Update wcDocker to fix the issue where the Scratch Pad grows in size if the results panel is resized. -[Bug \#3970](https://redmine.postgresql.org/issues/3970) - Prevent an error when closing the Sort/Filter dialogue with an empty filter string. -[Bug \#3973](https://redmine.postgresql.org/issues/3973) - Use 'set\_config(...)' function to update the 'bytea\_output' settings instead of 'UPDATE' statement, which is not allowed in the the read-only instances. -[Bug \#3974](https://redmine.postgresql.org/issues/3974) - Fix alignment of Connection type toggle switch of pgagent. -[Bug \#3982](https://redmine.postgresql.org/issues/3982) - Add full support and tests for all PG server side encodings. -[Bug \#3992](https://redmine.postgresql.org/issues/3992) - Add full support and tests for all PG server side encodings. -[Bug \#3995](https://redmine.postgresql.org/issues/3995) - Avoid 'bogus varno' message from Postgres when viewing the SQL for a table with triggers. -[Bug \#3998](https://redmine.postgresql.org/issues/3998) - Custom-encode forward slashes in URL parameters as Apache HTTPD doesn't allow them in some cases. -[Bug \#4000](https://redmine.postgresql.org/issues/4000) - Update CodeMirror to 5.43.0 to resolve issues with tab indent with use spaces enabled. -[Bug \#4013](https://redmine.postgresql.org/issues/4013) - Ensure long queries don't cause errors when downloading CSV in the Query Tool. -[Bug \#4021](https://redmine.postgresql.org/issues/4021) - Disable the editor and execute functions whilst queries are executing. -[Bug \#4054](https://redmine.postgresql.org/issues/4054) - Handle resultsets with zero columns correctly in the Query Tool. -[Bug \#4071](https://redmine.postgresql.org/issues/4071) - Ensure that Firefox prompts for a filename/location when downloading query results as a CSV file. +Bug #PEM-732 - Allow user to download explain plan from sql profiler \[Support Ticket #484049] +Bug #PEM-1544 - Give proper privileges and explicit type cast to hstore extension \[Support Ticket #820063] +Bug #PEM-1596 - PEM Agent registration creates cert files with additional .crt extension \[Support Ticket #853579] +Bug #PEM-1684 - Handle proper exception during alert processing. \[Support Ticket #830175] +Bug #PEM-1743 - Delete all the agent entry except for the lastest one sorted by login time \[Support Ticket #829037] +Bug #PEM-1855 - Server info probe shows numeric overflow error if shared memory size is too high \[Support Ticket #833535] +Bug #PEM-1943 - Honour the pause of the auto-refresh on the 'Alert Status' table whilst showing the alert details \[Support Ticket #853200] +Bug #PEM-1964 - RPM libboost dependencies issues on IBM powerbox \[Support Ticket #849462] +Bug #PEM-1976 - Fetch the session information for the same server when fetching the lock information \[Support Ticket #853549] +[Bug #3051](https://redmine.postgresql.org/issues/3051) - Replace Bootstrap switch with Bootstrap4 toggle to improve the performance. +[Bug #3096](https://redmine.postgresql.org/issues/3096) - Ensure size stats are prettified on the statistics tab when the UI language is not English. +[Bug #3272](https://redmine.postgresql.org/issues/3272) - Replace the PyCrypto module with the cryptography module. +[Bug #3352](https://redmine.postgresql.org/issues/3352) - Handle display of roles with expiration set to infinity correctly. +[Bug #3418](https://redmine.postgresql.org/issues/3418) - Allow editing of values in columns with the oid datatype which are not an actual row OID. +[Bug #3453](https://redmine.postgresql.org/issues/3453) - Fixed SQL for foreign table options. +[Bug #3475](https://redmine.postgresql.org/issues/3475) - Fixed execution time to show Hours part for long running queries in Query Tool. +[Bug #3505](https://redmine.postgresql.org/issues/3505) - Fix SQL generated for tables with inherited columns. +[Bug #3544](https://redmine.postgresql.org/issues/3544) - Make the Query Tool tab titles more concise and useful. +[Bug #3549](https://redmine.postgresql.org/issues/3549) - Display event trigger functions correctly on EPAS. +[Bug #3575](https://redmine.postgresql.org/issues/3575) - Ensure the context menu works after a server is renamed. +[Bug #3583](https://redmine.postgresql.org/issues/3583) - Update CodeMirror to 5.43.0 to resolve issues with auto-indent. +[Bug #3587](https://redmine.postgresql.org/issues/3587) - Fix support for bigint's in JSONB data. +[Bug #3600](https://redmine.postgresql.org/issues/3600) - Ensure JSON data isn't modified in-flight by psycopg2 when using View/Edit data. +[Bug #3608](https://redmine.postgresql.org/issues/3608) - Messages tab of query tool should be clear on subsequent execution of table/view using View/Edit Data. +[Bug #3609](https://redmine.postgresql.org/issues/3609) - Clear drop-down menu should be disabled for View/Edit Data. +[Bug #3664](https://redmine.postgresql.org/issues/3664) - Fixed Statistics panel hang issue for 1000+ tables. +[Bug #3673](https://redmine.postgresql.org/issues/3673) - Modify the Download as CSV option to use the same connection as the Query Tool its running in so temporary tables etc. can be used. +[Bug #3679](https://redmine.postgresql.org/issues/3679) - Fix a webpack issue that could cause the Query Tool to fail to render. +[Bug #3693](https://redmine.postgresql.org/issues/3693) - Proper error should be thrown when server group is created with existing name. +[Bug #3695](https://redmine.postgresql.org/issues/3695) - Ensure long string should be wrap in alertify dialogs. +[Bug #3697](https://redmine.postgresql.org/issues/3697) - Ensure that output of the query should be displayed even if Data Output window is detached from the Query Tool. +[Bug #3702](https://redmine.postgresql.org/issues/3702) - Ensure we display the relation name (and not the OID) in the locks table wherever possible. +[Bug #3740](https://redmine.postgresql.org/issues/3740) - Inline edbspl trigger functions should not be visible in Grant Wizard. +[Bug #3774](https://redmine.postgresql.org/issues/3774) - Proper SQL should be generated when create function with return type as custom type argument. +[Bug #3780](https://redmine.postgresql.org/issues/3780) - Ensure that null values handled properly in CSV download. +[Bug #3800](https://redmine.postgresql.org/issues/3800) - Ensure that database restriction of server dialog should work with special characters. +[Bug #3809](https://redmine.postgresql.org/issues/3809) - Ensure auto complete should works when first identifier in the FROM clause needs quoting. +[Bug #3810](https://redmine.postgresql.org/issues/3810) - Ensure auto complete should works for columns from a schema-qualified table. +[Bug #3811](https://redmine.postgresql.org/issues/3811) - Ensure that Backup/Restore button should work on single click. +[Bug #3836](https://redmine.postgresql.org/issues/3836) - Fix ordering of VACUUM options which changed in PG11. +[Bug #3837](https://redmine.postgresql.org/issues/3837) - Fixed SQL for when clause while creating Trigger. +[Bug #3838](https://redmine.postgresql.org/issues/3838) - Proper SQL should be generated when creating/changing column with custom type argument. +[Bug #3840](https://redmine.postgresql.org/issues/3840) - Ensure that file format combo box value should be retained when hidden files checkbox is toggled. +[Bug #3842](https://redmine.postgresql.org/issues/3842) - Don't show system catalogs in the schemas property list unless show system objects is enabled. +[Bug #3846](https://redmine.postgresql.org/issues/3846) - Proper SQL should be generated when create procedure with custom type arguments. +[Bug #3849](https://redmine.postgresql.org/issues/3849) - Ensure that browser should warn before close or refresh. +[Bug #3850](https://redmine.postgresql.org/issues/3850) - Fixed EXEC script for procedures. +[Bug #3853](https://redmine.postgresql.org/issues/3853) - Proper SQL should be generated when create domain of type interval with precision. +[Bug #3856](https://redmine.postgresql.org/issues/3856) - Fixed an issue while creating export job. +[Bug #3858](https://redmine.postgresql.org/issues/3858) - Drop-down should be closed when click on any other toolbar button. +[Bug #3861](https://redmine.postgresql.org/issues/3861) - Fix help for the backup/restore dialogues. +[Bug #3862](https://redmine.postgresql.org/issues/3862) - Fixed keyboard navigation for dialog tabs. +[Bug #3865](https://redmine.postgresql.org/issues/3865) - Increase frames splitter mouse hover area to make it easier to resize. +[Bug #3866](https://redmine.postgresql.org/issues/3866) - Ensure that last row of table data should be visible and user will be able to add new row. +[Bug #3871](https://redmine.postgresql.org/issues/3871) - Fixed alignment of tree arrow icons for Internet Explorer. +[Bug #3872](https://redmine.postgresql.org/issues/3872) - Ensure object names in external process dialogues are properly escaped. +[Bug #3873](https://redmine.postgresql.org/issues/3873) - Fix context sub-menu alignment on Safari. +[Bug #3877](https://redmine.postgresql.org/issues/3877) - Make the browser more robust in the face of multibyte characters in SQL_ASCII databases. +[Bug #3891](https://redmine.postgresql.org/issues/3891) - Correct order of Save and Cancel button for json/jsonb editing. +[Bug #3897](https://redmine.postgresql.org/issues/3897) - Data should be updated properly for FTS Configurations, FTS Dictionaries, FTS Parsers and FTS Templates. +[Bug #3899](https://redmine.postgresql.org/issues/3899) - Fixed unable to drop multiple Rules and Foreign Tables from properties tab. +[Bug #3903](https://redmine.postgresql.org/issues/3903) - Fixed Query Tool Initialization Error. +[Bug #3906](https://redmine.postgresql.org/issues/3906) - Fix alignment of Close and Maximize button of Grant Wizard. +[Bug #3908](https://redmine.postgresql.org/issues/3908) - Fixed keyboard navigation for Select2 and Privilege cell in Backgrid. +[Bug #3911](https://redmine.postgresql.org/issues/3911) - Add full support and tests for all PG server side encodings. +[Bug #3912](https://redmine.postgresql.org/issues/3912) - Fix editing of table data with a JSON primary key. +[Bug #3916](https://redmine.postgresql.org/issues/3916) - Correct schema should be displayed in Materialized View dialog. +[Bug #3927](https://redmine.postgresql.org/issues/3927) - Fixed debugger issue for procedure inside package for EPAS servers. +[Bug #3929](https://redmine.postgresql.org/issues/3929) - Fix alignment of help messages in properties panels. +[Bug #3932](https://redmine.postgresql.org/issues/3932) - Fix alignment of submenu for Internet Explorer. +[Bug #3935](https://redmine.postgresql.org/issues/3935) - Ensure that grant wizard should list down functions for EPAS server running with no-redwood-compat mode. \[Support Ticket #833292] +[Bug #3941](https://redmine.postgresql.org/issues/3941) - Dashboard graph optimization. +[Bug #3946](https://redmine.postgresql.org/issues/3946) - Fix alignment of checkbox to drop multiple schedules of pgAgent job. +[Bug #3948](https://redmine.postgresql.org/issues/3948) - Set the background colour for backform notes, and add an icon. +[Bug #3954](https://redmine.postgresql.org/issues/3954) - Remove Python 2.6 code that's now obsolete. +[Bug #3958](https://redmine.postgresql.org/issues/3958) - Don't exclude SELECT statements from transaction management in the Query Tool in case they call data-modifying functions. +[Bug #3959](https://redmine.postgresql.org/issues/3959) - Optimise display of Dependencies and Dependents, and use on-demand loading of rows in batches of 100. +[Bug #3961](https://redmine.postgresql.org/issues/3961) - Exclude HTTPExceptions from the all_exception_handler as they should be returned as-is. +[Bug #3963](https://redmine.postgresql.org/issues/3963) - Fix alignment of import/export toggle switch. +[Bug #3968](https://redmine.postgresql.org/issues/3968) - Update wcDocker to fix the issue where the Scratch Pad grows in size if the results panel is resized. +[Bug #3970](https://redmine.postgresql.org/issues/3970) - Prevent an error when closing the Sort/Filter dialogue with an empty filter string. +[Bug #3973](https://redmine.postgresql.org/issues/3973) - Use 'set_config(...)' function to update the 'bytea_output' settings instead of 'UPDATE' statement, which is not allowed in the the read-only instances. +[Bug #3974](https://redmine.postgresql.org/issues/3974) - Fix alignment of Connection type toggle switch of pgagent. +[Bug #3982](https://redmine.postgresql.org/issues/3982) - Add full support and tests for all PG server side encodings. +[Bug #3992](https://redmine.postgresql.org/issues/3992) - Add full support and tests for all PG server side encodings. +[Bug #3995](https://redmine.postgresql.org/issues/3995) - Avoid 'bogus varno' message from Postgres when viewing the SQL for a table with triggers. +[Bug #3998](https://redmine.postgresql.org/issues/3998) - Custom-encode forward slashes in URL parameters as Apache HTTPD doesn't allow them in some cases. +[Bug #4000](https://redmine.postgresql.org/issues/4000) - Update CodeMirror to 5.43.0 to resolve issues with tab indent with use spaces enabled. +[Bug #4013](https://redmine.postgresql.org/issues/4013) - Ensure long queries don't cause errors when downloading CSV in the Query Tool. +[Bug #4021](https://redmine.postgresql.org/issues/4021) - Disable the editor and execute functions whilst queries are executing. +[Bug #4054](https://redmine.postgresql.org/issues/4054) - Handle resultsets with zero columns correctly in the Query Tool. +[Bug #4071](https://redmine.postgresql.org/issues/4071) - Ensure that Firefox prompts for a filename/location when downloading query results as a CSV file. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/14_pem_release_notes_7_6.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/14_pem_release_notes_7_6.mdx index 5b9413fc7ca..da583cd678e 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/14_pem_release_notes_7_6.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/14_pem_release_notes_7_6.mdx @@ -2,51 +2,49 @@ title: "PEM v7.6" --- - - Release date: 2019-01-09 This release contains a number of features and fixes reported since the release of PEM v7.6 ## Features -Feature \#PEM-545 - Allow to delete the server group \[Support Ticket \#737596\] -Feature \#PEM-945 - Allow to use the performance diagnostic (edb\_wait\_stat) plugin for EDB Postgres Advance Server -Feature \#PEM-1522 - Adding support in the sslutils to work with OpenSSL 1.1+ -[Feature \#1513](https://redmine.postgresql.org/issues/1513) - Add support for dropping multiple objects at once from the collection Properties panel. -[Feature \#3562](https://redmine.postgresql.org/issues/3562) - Migrate from Bootstrap 3 to Bootstrap 4. -Feature \#PEM-1528/[Feature \#3589](https://redmine.postgresql.org/issues/3589) - Allow query plans to be downloaded as an SVG file \[Support Ticket \#816621\] +Feature #PEM-545 - Allow to delete the server group \[Support Ticket #737596] +Feature #PEM-945 - Allow to use the performance diagnostic (edb_wait_stat) plugin for EDB Postgres Advance Server +Feature #PEM-1522 - Adding support in the sslutils to work with OpenSSL 1.1+ +[Feature #1513](https://redmine.postgresql.org/issues/1513) - Add support for dropping multiple objects at once from the collection Properties panel. +[Feature #3562](https://redmine.postgresql.org/issues/3562) - Migrate from Bootstrap 3 to Bootstrap 4. +Feature #PEM-1528/[Feature #3589](https://redmine.postgresql.org/issues/3589) - Allow query plans to be downloaded as an SVG file \[Support Ticket #816621] ## Bug fixes -Bug \#PEM-1368/[Bug \#3676](https://redmine.postgresql.org/issues/3676) - Fix CREATE Script functionality for EDB-Wrapped functions. \[Support Ticket \#796491\] -Bug \#PEM-1530 - Fix the server level charts \[Support Ticket \#815806\] -Bug \#PEM-1532 - Disable TRACE option from the configuration file for the apache server \[Support Ticket \#784345, \#818383\] -Bug \#PEM-1610 - EFM Status in streaming replication dashborad on PEM not shown \[Support Ticket \#826397\] -Bug \#PEM-1611 - PEM not able to monitor EFM with error: Failed to parse EFM json file \[Support Ticket \#826197\] -[Bug \#3016](https://redmine.postgresql.org/issues/3016) - Ensure previous notices are not removed from the Messages tab in the Query Tool if an error occurs during query execution. -[Bug \#3029](https://redmine.postgresql.org/issues/3029) - Allow the selection order to be preserved in the Select2 control to fix column ordering in data Import/Export. -[Bug \#3083](https://redmine.postgresql.org/issues/3083) - Increase the size of the resize handle of the edit grid text pop-out. -[Bug \#3232](https://redmine.postgresql.org/issues/3232) - Ensure that Utilities(Backup/Restore/Maintenence/Import-Export) should not be started if binary path is wrong and also added 'Stop Process' button to cancel the process. -[Bug \#3354](https://redmine.postgresql.org/issues/3354) - Fix handling of array types as inputs to the debugger. -[Bug \#3433](https://redmine.postgresql.org/issues/3433) - Fix an issue that could cause the Query Tool to fail to render. -[Bug \#3559](https://redmine.postgresql.org/issues/3559) - Further improvements to treeview restoration. -[Bug \#3619](https://redmine.postgresql.org/issues/3619) - Add titles to the code areas of the Query Tool and Debugger to ensure that panels can be re-docked within them. -[Bug \#3629](https://redmine.postgresql.org/issues/3629) - Allow use of 0 (integer) and empty strings as parameters in the debugger. -[Bug \#3638](https://redmine.postgresql.org/issues/3638) - Fix syntax error when creating new pgAgent schedules with a start date/time and exception. -[Bug \#3711](https://redmine.postgresql.org/issues/3711) - Fix an encoding issue in the query tool. -[Bug \#3722](https://redmine.postgresql.org/issues/3722) - Ensure that utility existence check should work for schema and other child objects while taking Backup/Restore. -[Bug \#3723](https://redmine.postgresql.org/issues/3723) - Properly report errors when debugging cannot be started. -[Bug \#3726](https://redmine.postgresql.org/issues/3726) - Include the WHERE clause on EXCLUDE constraints in RE-SQL. -[Bug \#3734](https://redmine.postgresql.org/issues/3734) - Prevent the debugger controls being pressed again before previous processing is complete. -[Bug \#3736](https://redmine.postgresql.org/issues/3736) - Fix toggle breakpoints buttons in the debugger. -[Bug \#3742](https://redmine.postgresql.org/issues/3742) - Fix changes to the NOT NULL and default value options in the Table Dialogue. -[Bug \#3746](https://redmine.postgresql.org/issues/3746) - Fix dropping of multiple functions/procedures at once. -[Bug \#3753](https://redmine.postgresql.org/issues/3753) - Fix an issue when user define Cast from smallint->text is created. -[Bug \#3757](https://redmine.postgresql.org/issues/3757) - Hide Radio buttons that should not be shown on the maintenance dialogue. -[Bug \#3797](https://redmine.postgresql.org/issues/3797) - Prevent attempts to bulk-drop schema objects. -[Bug \#3798](https://redmine.postgresql.org/issues/3798) - Ensure the browser toolbar buttons work in languages other than English. -[Bug \#3805](https://redmine.postgresql.org/issues/3805) - Allow horizontal sizing of the edit grid text pop-out. -[Bug \#3821](https://redmine.postgresql.org/issues/3821) - Ensure identifiers are properly displayed in the plan viewer. -[Bug \#3823](https://redmine.postgresql.org/issues/3823) - Delete/Drop and drop cascade option under properties section is disabled for multiple objects. -[Bug \#3824](https://redmine.postgresql.org/issues/3824) - Ensure the dashboard tabs are styles correctly. +Bug #PEM-1368/[Bug #3676](https://redmine.postgresql.org/issues/3676) - Fix CREATE Script functionality for EDB-Wrapped functions. \[Support Ticket #796491] +Bug #PEM-1530 - Fix the server level charts \[Support Ticket #815806] +Bug #PEM-1532 - Disable TRACE option from the configuration file for the apache server \[Support Ticket #784345, #818383] +Bug #PEM-1610 - EFM Status in streaming replication dashborad on PEM not shown \[Support Ticket #826397] +Bug #PEM-1611 - PEM not able to monitor EFM with error: Failed to parse EFM json file \[Support Ticket #826197] +[Bug #3016](https://redmine.postgresql.org/issues/3016) - Ensure previous notices are not removed from the Messages tab in the Query Tool if an error occurs during query execution. +[Bug #3029](https://redmine.postgresql.org/issues/3029) - Allow the selection order to be preserved in the Select2 control to fix column ordering in data Import/Export. +[Bug #3083](https://redmine.postgresql.org/issues/3083) - Increase the size of the resize handle of the edit grid text pop-out. +[Bug #3232](https://redmine.postgresql.org/issues/3232) - Ensure that Utilities(Backup/Restore/Maintenence/Import-Export) should not be started if binary path is wrong and also added 'Stop Process' button to cancel the process. +[Bug #3354](https://redmine.postgresql.org/issues/3354) - Fix handling of array types as inputs to the debugger. +[Bug #3433](https://redmine.postgresql.org/issues/3433) - Fix an issue that could cause the Query Tool to fail to render. +[Bug #3559](https://redmine.postgresql.org/issues/3559) - Further improvements to treeview restoration. +[Bug #3619](https://redmine.postgresql.org/issues/3619) - Add titles to the code areas of the Query Tool and Debugger to ensure that panels can be re-docked within them. +[Bug #3629](https://redmine.postgresql.org/issues/3629) - Allow use of 0 (integer) and empty strings as parameters in the debugger. +[Bug #3638](https://redmine.postgresql.org/issues/3638) - Fix syntax error when creating new pgAgent schedules with a start date/time and exception. +[Bug #3711](https://redmine.postgresql.org/issues/3711) - Fix an encoding issue in the query tool. +[Bug #3722](https://redmine.postgresql.org/issues/3722) - Ensure that utility existence check should work for schema and other child objects while taking Backup/Restore. +[Bug #3723](https://redmine.postgresql.org/issues/3723) - Properly report errors when debugging cannot be started. +[Bug #3726](https://redmine.postgresql.org/issues/3726) - Include the WHERE clause on EXCLUDE constraints in RE-SQL. +[Bug #3734](https://redmine.postgresql.org/issues/3734) - Prevent the debugger controls being pressed again before previous processing is complete. +[Bug #3736](https://redmine.postgresql.org/issues/3736) - Fix toggle breakpoints buttons in the debugger. +[Bug #3742](https://redmine.postgresql.org/issues/3742) - Fix changes to the NOT NULL and default value options in the Table Dialogue. +[Bug #3746](https://redmine.postgresql.org/issues/3746) - Fix dropping of multiple functions/procedures at once. +[Bug #3753](https://redmine.postgresql.org/issues/3753) - Fix an issue when user define Cast from smallint->text is created. +[Bug #3757](https://redmine.postgresql.org/issues/3757) - Hide Radio buttons that should not be shown on the maintenance dialogue. +[Bug #3797](https://redmine.postgresql.org/issues/3797) - Prevent attempts to bulk-drop schema objects. +[Bug #3798](https://redmine.postgresql.org/issues/3798) - Ensure the browser toolbar buttons work in languages other than English. +[Bug #3805](https://redmine.postgresql.org/issues/3805) - Allow horizontal sizing of the edit grid text pop-out. +[Bug #3821](https://redmine.postgresql.org/issues/3821) - Ensure identifiers are properly displayed in the plan viewer. +[Bug #3823](https://redmine.postgresql.org/issues/3823) - Delete/Drop and drop cascade option under properties section is disabled for multiple objects. +[Bug #3824](https://redmine.postgresql.org/issues/3824) - Ensure the dashboard tabs are styles correctly. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/15_pem_release_notes_7_5.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/15_pem_release_notes_7_5.mdx index b5cd30b122d..40803b507dd 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/15_pem_release_notes_7_5.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/15_pem_release_notes_7_5.mdx @@ -2,44 +2,42 @@ title: "PEM v7.5" --- - - Release date: 2018-10-24 This release contains a number of features and fixes reported since the release of PEM v7.5 ## Features -Feature \#PEM-796 - As a user I should be able to exclude particular mount point from the disk-space probe -Feature \#PEM-1020 - As a user I would like to monitor the advanced server V11 -Feature \#PEM-1021 - As a user I would like to monitor the postgres V11 -Feature \#PEM-1022 - As a user I would like to use PG/EPAS v11 as backend server for the PEM -Feature \#PEM-1295 - Limit the number of connections to the PEM DB Server (using pgbouncer - connection pooler) \[Support Ticket \#784672\] -[Feature \#3564](https://redmine.postgresql.org/issues/3564) - Add shortcuts for View Data and the Query tool to the Browser header bar. -[Feature \#3514](https://redmine.postgresql.org/issues/3514) - Add optional data point markers and mouse-over tooltips to display values on graphs. -[Feature \#1407](https://redmine.postgresql.org/issues/1407) - Add a geometry viewer that can render PostGIS data on a blank canvas or various map sources. -[Feature \#1253](https://redmine.postgresql.org/issues/1253) - Save the treeview state periodically, and restore it automatically when reconnecting. +Feature #PEM-796 - As a user I should be able to exclude particular mount point from the disk-space probe +Feature #PEM-1020 - As a user I would like to monitor the advanced server V11 +Feature #PEM-1021 - As a user I would like to monitor the postgres V11 +Feature #PEM-1022 - As a user I would like to use PG/EPAS v11 as backend server for the PEM +Feature #PEM-1295 - Limit the number of connections to the PEM DB Server (using pgbouncer - connection pooler) \[Support Ticket #784672] +[Feature #3564](https://redmine.postgresql.org/issues/3564) - Add shortcuts for View Data and the Query tool to the Browser header bar. +[Feature #3514](https://redmine.postgresql.org/issues/3514) - Add optional data point markers and mouse-over tooltips to display values on graphs. +[Feature #1407](https://redmine.postgresql.org/issues/1407) - Add a geometry viewer that can render PostGIS data on a blank canvas or various map sources. +[Feature #1253](https://redmine.postgresql.org/issues/1253) - Save the treeview state periodically, and restore it automatically when reconnecting. ## Bug fixes -Bug \#PEM-724 - Can't debug function and procedure in package(EnterpriseDB) use non-superuser role \[Support Ticket \#569062\] -Bug \#PEM-918/[\#3596](https://redmine.postgresql.org/issues/3596) - Fix support for the CLOB datatype in EPAS \[Support Ticket \#761853\] -Bug \#PEM-1004 - Allow to enter floating values for threshold fields in Alerts configuration window \[Support Ticket \#775364\] -Bug \#PEM-1330 - User can not save password when connecting to server \[Support Ticket \#792298\] -Bug \#PEM-1350 - Allow to install on PostgreSQL/EDB Postgres Advanced Server 9.4 \[Support Ticket \#796149\] -Bug \#PEM-1431 - Release the connections for the connected servers on logout \[Support Ticket \#807009\] -[Bug \#3191](https://redmine.postgresql.org/issues/3191) - Fixed debugger execution issues. -[Bug \#3420](https://redmine.postgresql.org/issues/3420) - Merge pgcli code with version 1.10.3, which is used for auto complete feature. -[Bug \#3525](https://redmine.postgresql.org/issues/3525) - Ensure that refresh button on dashboard should refresh the table. -[Bug \#3551](https://redmine.postgresql.org/issues/3551) - Fix handling of backslashes in the edit grid. -[Bug \#3554](https://redmine.postgresql.org/issues/3554) - Fix auto scrolling issue in debugger on step in and step out. \[Support Ticket \#779956\] -[Bug \#3576](https://redmine.postgresql.org/issues/3576) - Ensure queries are no longer executed when dashboards are closed. -[Bug \#3604](https://redmine.postgresql.org/issues/3604) - Correct the documentation of View/Edit data. -[Bug \#3607](https://redmine.postgresql.org/issues/3607) - Fix logic around validation and highlighting of Sort/Filter in the Query Tool. -[Bug \#3630](https://redmine.postgresql.org/issues/3630) - Ensure auto-complete works for objects in schemas other than public and pg\_catalog. -[Bug \#3657](https://redmine.postgresql.org/issues/3657) - Ensure changes to Query Tool settings from the Preferences dialogue are applied before executing queries. -[Bug \#3658](https://redmine.postgresql.org/issues/3658) - Swap the Schema and Schemas icons and Catalog and Catalogs icons that had been used the wrong way around. -[Bug \#3660](https://redmine.postgresql.org/issues/3660) - Rename the 'SQL Editor' section of the Preferences to 'Query Tool' as it applies to the whole tool, not just the editor. -[Bug \#3674](https://redmine.postgresql.org/issues/3674) - Cleanup session files periodically. -[Bug \#3700](https://redmine.postgresql.org/issues/3700) - Fix connection garbage collector. -[Bug \#3703](https://redmine.postgresql.org/issues/3703) - Purge connections from the cache on logout. \[Support Ticket \#807009\] +Bug #PEM-724 - Can't debug function and procedure in package(EnterpriseDB) use non-superuser role \[Support Ticket #569062] +Bug #PEM-918/[#3596](https://redmine.postgresql.org/issues/3596) - Fix support for the CLOB datatype in EPAS \[Support Ticket #761853] +Bug #PEM-1004 - Allow to enter floating values for threshold fields in Alerts configuration window \[Support Ticket #775364] +Bug #PEM-1330 - User can not save password when connecting to server \[Support Ticket #792298] +Bug #PEM-1350 - Allow to install on PostgreSQL/EDB Postgres Advanced Server 9.4 \[Support Ticket #796149] +Bug #PEM-1431 - Release the connections for the connected servers on logout \[Support Ticket #807009] +[Bug #3191](https://redmine.postgresql.org/issues/3191) - Fixed debugger execution issues. +[Bug #3420](https://redmine.postgresql.org/issues/3420) - Merge pgcli code with version 1.10.3, which is used for auto complete feature. +[Bug #3525](https://redmine.postgresql.org/issues/3525) - Ensure that refresh button on dashboard should refresh the table. +[Bug #3551](https://redmine.postgresql.org/issues/3551) - Fix handling of backslashes in the edit grid. +[Bug #3554](https://redmine.postgresql.org/issues/3554) - Fix auto scrolling issue in debugger on step in and step out. \[Support Ticket #779956] +[Bug #3576](https://redmine.postgresql.org/issues/3576) - Ensure queries are no longer executed when dashboards are closed. +[Bug #3604](https://redmine.postgresql.org/issues/3604) - Correct the documentation of View/Edit data. +[Bug #3607](https://redmine.postgresql.org/issues/3607) - Fix logic around validation and highlighting of Sort/Filter in the Query Tool. +[Bug #3630](https://redmine.postgresql.org/issues/3630) - Ensure auto-complete works for objects in schemas other than public and pg_catalog. +[Bug #3657](https://redmine.postgresql.org/issues/3657) - Ensure changes to Query Tool settings from the Preferences dialogue are applied before executing queries. +[Bug #3658](https://redmine.postgresql.org/issues/3658) - Swap the Schema and Schemas icons and Catalog and Catalogs icons that had been used the wrong way around. +[Bug #3660](https://redmine.postgresql.org/issues/3660) - Rename the 'SQL Editor' section of the Preferences to 'Query Tool' as it applies to the whole tool, not just the editor. +[Bug #3674](https://redmine.postgresql.org/issues/3674) - Cleanup session files periodically. +[Bug #3700](https://redmine.postgresql.org/issues/3700) - Fix connection garbage collector. +[Bug #3703](https://redmine.postgresql.org/issues/3703) - Purge connections from the cache on logout. \[Support Ticket #807009] diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/index.mdx index 8f136775bc5..ee13a98d865 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/12_release_notes/index.mdx @@ -2,12 +2,10 @@ title: "Release Notes" --- - - EDB Postgres Enterprise Manager release notes provide the information on the features and improvements in each release. This page includes release notes for major release and minor (bugfix) releases. Select your version from the list below to see the release notes for it.
-pem\_release\_notes\_8\_0\_1 pem\_release\_notes\_8\_0 pem\_release\_notes\_7\_16 pem\_release\_notes\_7\_15 pem\_release\_notes\_7\_14 pem\_release\_notes\_7\_13 pem\_release\_notes\_7\_12 pem\_release\_notes\_7\_11 pem\_release\_notes\_7\_10 pem\_release\_notes\_7\_9 pem\_release\_notes\_7\_8 pem\_release\_notes\_7\_7\_1 pem\_release\_notes\_7\_7 pem\_release\_notes\_7\_6 pem\_release\_notes\_7\_5 +pem_release_notes_8_0_1 pem_release_notes_8_0 pem_release_notes_7_16 pem_release_notes_7_15 pem_release_notes_7_14 pem_release_notes_7_13 pem_release_notes_7_12 pem_release_notes_7_11 pem_release_notes_7_10 pem_release_notes_7_9 pem_release_notes_7_8 pem_release_notes_7_7_1 pem_release_notes_7_7 pem_release_notes_7_6 pem_release_notes_7_5
diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/index.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/index.mdx index c95c7569a62..beed5cde740 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/index.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/index.mdx @@ -2,8 +2,6 @@ title: "Postgres Enterprise Manager" --- - - Welcome to Postgres Enterprise Manager (PEM). Postgres Enterprise Manager (PEM) consists of components that provide the management and analytical functionality for your EDB Postgres Advanced Server or PostgreSQL database. PEM is based on the Open Source pgAdmin IV project. pgAdmin is the leading Open Source management tool for Postgres, the world's most advanced Open Source database. pgAdmin IV is a comprehensive [database](http://www.postgresql.org) design and management system. pgAdmin 4 is designed to meet the needs of both novice and experienced Postgres users alike, providing a powerful graphical interface that simplifies the creation, maintenance and use of database objects. @@ -12,7 +10,7 @@ Contents:
-toc\_pem\_getting\_started toc\_pem\_agent toc\_pem\_client toc\_pem\_features toc\_pem\_management\_basics toc\_pem\_bart\_management toc\_pem\_sql\_profiler toc\_pem\_developer\_tools toc\_pem\_configure\_pgbouncer pgagent appendices release\_notes +toc_pem_getting_started toc_pem_agent toc_pem_client toc_pem_features toc_pem_management_basics toc_pem_bart_management toc_pem_sql_profiler toc_pem_developer_tools toc_pem_configure_pgbouncer pgagent appendices release_notes
diff --git a/product_docs/docs/pem/8.0.1/pem_upgrade/01_upgrading_pem_installation/04_upgrading_pem_installation_linux_graphical.mdx b/product_docs/docs/pem/8.0.1/pem_upgrade/01_upgrading_pem_installation/04_upgrading_pem_installation_linux_graphical.mdx index 3e0f08fb920..914f3466b38 100644 --- a/product_docs/docs/pem/8.0.1/pem_upgrade/01_upgrading_pem_installation/04_upgrading_pem_installation_linux_graphical.mdx +++ b/product_docs/docs/pem/8.0.1/pem_upgrade/01_upgrading_pem_installation/04_upgrading_pem_installation_linux_graphical.mdx @@ -56,10 +56,12 @@ The default installation location for the PEM Agent when installed by the graphi !!! Note You may need to enable the `[extras]` repository definition in the `CentOS-Base.repo` file (located in `/etc/yum.repos.d`). - If you are a Red Hat Network user, + ``` + If you are a Red Hat Network user, - - You must also enable the `rhel--server-optional-rpms` repository to use EPEL packages, where *x* specifies the version of RHEL on the host. You can make the repository accessible by enabling the `RHEL optional subchannel` for `RHN-Classic`. If you have a certificate-based subscription, then you must also enable `rhel--server-eus-optional-rpms` repository to use EPEL packages or please see the `Red Hat Subscription Management Guide` for the required repository. - - You must also enable the `rhel--server-extras-rpms` repository, where `x` specifies the version of the RHEL on the host. + - You must also enable the `rhel--server-optional-rpms` repository to use EPEL packages, where *x* specifies the version of RHEL on the host. You can make the repository accessible by enabling the `RHEL optional subchannel` for `RHN-Classic`. If you have a certificate-based subscription, then you must also enable `rhel--server-eus-optional-rpms` repository to use EPEL packages or please see the `Red Hat Subscription Management Guide` for the required repository. + - You must also enable the `rhel--server-extras-rpms` repository, where `x` specifies the version of the RHEL on the host. + ``` 3. Install and configure the `edb.repo` file: diff --git a/product_docs/docs/pem/8.0.1/pem_upgrade/02_upgrading_backend_database.mdx b/product_docs/docs/pem/8.0.1/pem_upgrade/02_upgrading_backend_database.mdx index 78c086cfd2d..a9f5df3c482 100644 --- a/product_docs/docs/pem/8.0.1/pem_upgrade/02_upgrading_backend_database.mdx +++ b/product_docs/docs/pem/8.0.1/pem_upgrade/02_upgrading_backend_database.mdx @@ -7,86 +7,86 @@ If you are updating both PEM components and the PEM backend database, you should !!! Note From PEM 8.0 onwards, the PostgreSQL or EPAS version 11 or higher are only supported as backend database server. Hence if your backend database server is lower than version 11 then first you need to upgrade your backend database server and then upgrade the PEM components. - The update process described in this section uses the `pg_upgrade` utility to migrate from one version of the backend server to a more recent version. `pg_upgrade` facilitates migration between any supported version of Postgres, and any subsequent release of Postgres that is supported on the same platform. !!! Note If the source PEM Server is lower than the 7.16 version, then you need to replace the following functions before you run `pg_upgrade`: - - The `abstime`, `reltime`, and `tinterval` datatypes are depreacated from Postgres version 12 or later, hence to replace those dataypes with `timestamptz` data type use below command: - - ```text - DO - $$ - DECLARE - rec record; - cnt integer; - BEGIN - -- Check for the deprecated type in our user info probe - SELECT count(*) INTO cnt - FROM pem.probe_column - WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; - IF cnt = 0 THEN - RETURN; - END IF; - ALTER TABLE pemdata.user_info - ALTER COLUMN valuntil SET DATA TYPE timestamptz; - ALTER TABLE pemhistory.user_info - ALTER COLUMN valuntil SET DATA TYPE timestamptz; - -- Now update the pem.probe_column itself - UPDATE pem.probe_column - SET sql_data_type = ‘timestamptz’ - WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; - END; - $$ LANGUAGE ‘plpgsql’; - ``` - - - Replace the below function to avoid any alert errors: - - ```text - CREATE OR REPLACE FUNCTION pem.check_alert_params_array_size( - template_id pem.alert_template.id%type, params text[] - ) - RETURNS bool AS $FUNC$ - DECLARE - res bool := TRUE; - BEGIN - /* - * During restoring the pem database, it does not maintain the order while - * inserting data in the table, and uses the sort table based on the - * names. - * Hence - we need to check the foreign key constraint is present before - * validating these values. - */ - IF EXISTS( - SELECT 1 FROM information_schema.table_constraints - WHERE constraint_name='alert_template_id_fkey' AND - table_name='alert' AND table_schema='pem' - ) THEN - /* - * Need to use the IS TRUE construct outside the main query, because - * otherwise if there's no template by that ID then the query would return - * 0 rows and the result of the function would be undefined and CHECK - * constraint would succeed. - * Probably this is being over-cautious, because pem.alert.template_id - * references pem.alert_template.id. But the SQL standard (probably) does - * not define the order in which the CHECK or the FOREIGN KEY constraints - * should be validated; in case CHECK is validated first, we want it to - * fail. - */ - EXECUTE $SQL$ - SELECT ( - SELECT pem.check_array_size_equal(t.param_names, $2) - FROM pem.alert_template AS t - WHERE id = $1 - ) IS TRUE - $SQL$ INTO res USING template_id, params; +```` +- The `abstime`, `reltime`, and `tinterval` datatypes are depreacated from Postgres version 12 or later, hence to replace those dataypes with `timestamptz` data type use below command: + +```text +DO +$$ +DECLARE + rec record; + cnt integer; +BEGIN + -- Check for the deprecated type in our user info probe + SELECT count(*) INTO cnt + FROM pem.probe_column + WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; + IF cnt = 0 THEN + RETURN; END IF; - RETURN res; - END - $FUNC$ LANGUAGE 'plpgsql'; - ``` - + ALTER TABLE pemdata.user_info + ALTER COLUMN valuntil SET DATA TYPE timestamptz; + ALTER TABLE pemhistory.user_info + ALTER COLUMN valuntil SET DATA TYPE timestamptz; + -- Now update the pem.probe_column itself + UPDATE pem.probe_column + SET sql_data_type = ‘timestamptz’ + WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; +END; +$$ LANGUAGE ‘plpgsql’; +``` + +- Replace the below function to avoid any alert errors: + +```text +CREATE OR REPLACE FUNCTION pem.check_alert_params_array_size( +template_id pem.alert_template.id%type, params text[] +) +RETURNS bool AS $FUNC$ +DECLARE + res bool := TRUE; +BEGIN + /* + * During restoring the pem database, it does not maintain the order while + * inserting data in the table, and uses the sort table based on the + * names. + * Hence - we need to check the foreign key constraint is present before + * validating these values. + */ + IF EXISTS( + SELECT 1 FROM information_schema.table_constraints + WHERE constraint_name='alert_template_id_fkey' AND + table_name='alert' AND table_schema='pem' + ) THEN + /* + * Need to use the IS TRUE construct outside the main query, because + * otherwise if there's no template by that ID then the query would return + * 0 rows and the result of the function would be undefined and CHECK + * constraint would succeed. + * Probably this is being over-cautious, because pem.alert.template_id + * references pem.alert_template.id. But the SQL standard (probably) does + * not define the order in which the CHECK or the FOREIGN KEY constraints + * should be validated; in case CHECK is validated first, we want it to + * fail. + */ + EXECUTE $SQL$ + SELECT ( + SELECT pem.check_array_size_equal(t.param_names, $2) + FROM pem.alert_template AS t + WHERE id = $1 + ) IS TRUE + $SQL$ INTO res USING template_id, params; + END IF; + RETURN res; +END +$FUNC$ LANGUAGE 'plpgsql'; +``` +```` `pg_upgrade` supports a transfer of data between servers of the same type. For example, you can use `pg_upgrade` to move data from a PostgreSQL 9.6 backend database to a PostgreSQL 11 backend database, but not to an Advanced Server 11 backend database. If you wish to migrate to a different type of backend database (i.e from a PostgreSQL server to Advanced Server), see [Moving the Postgres Enterprise Manager Server](03_moving_pem_server). diff --git a/product_docs/docs/pem/8.0.1/pem_upgrade/03_moving_pem_server.mdx b/product_docs/docs/pem/8.0.1/pem_upgrade/03_moving_pem_server.mdx index 83142303e68..e3a7af41cfd 100644 --- a/product_docs/docs/pem/8.0.1/pem_upgrade/03_moving_pem_server.mdx +++ b/product_docs/docs/pem/8.0.1/pem_upgrade/03_moving_pem_server.mdx @@ -62,100 +62,102 @@ Before starting the server migration, you should ensure that the firewalls betwe 4. Generate a Backup Script of the Source PEM Server - You can use the `pg_dump` utility to generate a script that contains the commands required to recreate the `pem` database on the target host. By default, `pg_dump` is installed in the `bin` directory under your Postgres installation. To invoke `pg_dump`, open a command line, navigate to the `bin` directory, and enter: + You can use the `pg_dump` utility to generate a script that contains the commands required to recreate the `pem` database on the target host. By default, `pg_dump` is installed in the `bin` directory under your Postgres installation. To invoke `pg_dump`, open a command line, navigate to the `bin` directory, and enter: ```text pg_dump -U > ``` - Where: + Where: - `` specifies the name of the database superuser for the PEM backend database. - `` specifies the name of the PEM backend database. - `` specifies the name of the script generated by pg_dump. - When prompted, provide the password associated with the user specified. - - The command shown instructs `pg_dump` to generate a script that (when executed) will re-create the `pem` database. The script will be named `backup.sql`, and will be created in the `tmp` directory. `pg_dump` is connecting to the server using the credentials of the user, `postgres`. - - Note that invoking the `pg_dump` utility will not interrupt current database users. - - !!! Note - If the source PEM Server is lower than the 7.16 version, then you need to replace the following functions before you run ``pg_dump`` to take backup: - - - The ``abstime``, ``reltime``, and ``tinterval`` datatypes are depreacated from Postgres version 12 or later, hence to replace those dataypes with ``timestamptz`` data type use below command: - - ```text - DO - $$ - DECLARE - rec record; - cnt integer; - BEGIN - -- Check for the deprecated type in our user info probe - SELECT count(*) INTO cnt - FROM pem.probe_column - WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; - IF cnt = 0 THEN - RETURN; - END IF; - ALTER TABLE pemdata.user_info - ALTER COLUMN valuntil SET DATA TYPE timestamptz; - ALTER TABLE pemhistory.user_info - ALTER COLUMN valuntil SET DATA TYPE timestamptz; - -- Now update the pem.probe_column itself - UPDATE pem.probe_column - SET sql_data_type = ‘timestamptz’ - WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; - END; - $$ LANGUAGE ‘plpgsql’; - ``` - - - Replace the below function to avoid any alert errors: - - ```text - CREATE OR REPLACE FUNCTION pem.check_alert_params_array_size( - template_id pem.alert_template.id%type, params text[] - ) - RETURNS bool AS $FUNC$ - DECLARE - res bool := TRUE; - BEGIN - /* - * During restoring the pem database, it does not maintain the order while - * inserting data in the table, and uses the sort table based on the - * names. - * Hence - we need to check the foreign key constraint is present before - * validating these values. - */ - IF EXISTS( - SELECT 1 FROM information_schema.table_constraints - WHERE constraint_name='alert_template_id_fkey' AND - table_name='alert' AND table_schema='pem' - ) THEN - /* - * Need to use the IS TRUE construct outside the main query, because - * otherwise if there's no template by that ID then the query would return - * 0 rows and the result of the function would be undefined and CHECK - * constraint would succeed. - * Probably this is being over-cautious, because pem.alert.template_id - * references pem.alert_template.id. But the SQL standard (probably) does - * not define the order in which the CHECK or the FOREIGN KEY constraints - * should be validated; in case CHECK is validated first, we want it to - * fail. - */ - EXECUTE $SQL$ - SELECT ( - SELECT pem.check_array_size_equal(t.param_names, $2) - FROM pem.alert_template AS t - WHERE id = $1 - ) IS TRUE - $SQL$ INTO res USING template_id, params; - END IF; - RETURN res; - END - $FUNC$ LANGUAGE 'plpgsql'; - ``` + When prompted, provide the password associated with the user specified. + + The command shown instructs `pg_dump` to generate a script that (when executed) will re-create the `pem` database. The script will be named `backup.sql`, and will be created in the `tmp` directory. `pg_dump` is connecting to the server using the credentials of the user, `postgres`. + + Note that invoking the `pg_dump` utility will not interrupt current database users. + + !!! Note + If the source PEM Server is lower than the 7.16 version, then you need to replace the following functions before you run `pg_dump` to take backup: + + ```` + - The ``abstime``, ``reltime``, and ``tinterval`` datatypes are depreacated from Postgres version 12 or later, hence to replace those dataypes with ``timestamptz`` data type use below command: + + ```text + DO + $$ + DECLARE + rec record; + cnt integer; + BEGIN + -- Check for the deprecated type in our user info probe + SELECT count(*) INTO cnt + FROM pem.probe_column + WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; + IF cnt = 0 THEN + RETURN; + END IF; + ALTER TABLE pemdata.user_info + ALTER COLUMN valuntil SET DATA TYPE timestamptz; + ALTER TABLE pemhistory.user_info + ALTER COLUMN valuntil SET DATA TYPE timestamptz; + -- Now update the pem.probe_column itself + UPDATE pem.probe_column + SET sql_data_type = ‘timestamptz’ + WHERE sql_data_type = ‘abstime’ AND internal_name = ‘valuntil’; + END; + $$ LANGUAGE ‘plpgsql’; + ``` + + - Replace the below function to avoid any alert errors: + + ```text + CREATE OR REPLACE FUNCTION pem.check_alert_params_array_size( + template_id pem.alert_template.id%type, params text[] + ) + RETURNS bool AS $FUNC$ + DECLARE + res bool := TRUE; + BEGIN + /* + * During restoring the pem database, it does not maintain the order while + * inserting data in the table, and uses the sort table based on the + * names. + * Hence - we need to check the foreign key constraint is present before + * validating these values. + */ + IF EXISTS( + SELECT 1 FROM information_schema.table_constraints + WHERE constraint_name='alert_template_id_fkey' AND + table_name='alert' AND table_schema='pem' + ) THEN + /* + * Need to use the IS TRUE construct outside the main query, because + * otherwise if there's no template by that ID then the query would return + * 0 rows and the result of the function would be undefined and CHECK + * constraint would succeed. + * Probably this is being over-cautious, because pem.alert.template_id + * references pem.alert_template.id. But the SQL standard (probably) does + * not define the order in which the CHECK or the FOREIGN KEY constraints + * should be validated; in case CHECK is validated first, we want it to + * fail. + */ + EXECUTE $SQL$ + SELECT ( + SELECT pem.check_array_size_equal(t.param_names, $2) + FROM pem.alert_template AS t + WHERE id = $1 + ) IS TRUE + $SQL$ INTO res USING template_id, params; + END IF; + RETURN res; + END + $FUNC$ LANGUAGE 'plpgsql'; + ``` + ```` 5. Move the Backup to the Target Host From 9274a59882fc3e3fc55df4dcd9cf3e421a9815d4 Mon Sep 17 00:00:00 2001 From: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Date: Wed, 3 Mar 2021 18:51:56 +0000 Subject: [PATCH 03/28] can't move to stringify 9 until Remark 13 Former-commit-id: f223c1e26c8c257a69c4513373d9ff996cbdee8b --- package.json | 2 +- yarn.lock | 23 ++--------------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/package.json b/package.json index b24a13be125..651af77e6c9 100644 --- a/package.json +++ b/package.json @@ -86,7 +86,7 @@ "rehype-stringify": "^8.0.0", "remark-frontmatter": "^2.0.0", "remark-mdx": "^1.6.22", - "remark-stringify": "^9.0.1", + "remark-stringify": "^8.1.1", "to-vfile": "^6.1.0", "typescript": "^4.1.3" }, diff --git a/yarn.lock b/yarn.lock index 11a5e6c3d77..6486cab0c74 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9856,7 +9856,7 @@ loglevel@^1.6.8: resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.7.1.tgz#005fde2f5e6e47068f935ff28573e125ef72f197" integrity sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw== -longest-streak@^2.0.0, longest-streak@^2.0.1: +longest-streak@^2.0.1: version "2.0.4" resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.4.tgz#b8599957da5b5dab64dee3fe316fa774597d90e4" integrity sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg== @@ -10122,18 +10122,6 @@ mdast-util-to-hast@^3.0.4: unist-util-visit "^1.1.0" xtend "^4.0.1" -mdast-util-to-markdown@^0.6.0: - version "0.6.2" - resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-0.6.2.tgz#8fe6f42a2683c43c5609dfb40407c095409c85b4" - integrity sha512-iRczns6WMvu0hUw02LXsPDJshBIwtUPbvHBWo19IQeU0YqmzlA8Pd30U8V7uiI0VPkxzS7A/NXBXH6u+HS87Zg== - dependencies: - "@types/unist" "^2.0.0" - longest-streak "^2.0.0" - mdast-util-to-string "^2.0.0" - parse-entities "^2.0.0" - repeat-string "^1.0.0" - zwitch "^1.0.0" - mdast-util-to-nlcst@^3.2.0: version "3.2.3" resolved "https://registry.yarnpkg.com/mdast-util-to-nlcst/-/mdast-util-to-nlcst-3.2.3.tgz#dcd0f51b59515b11a0700aeb40f168ed7ba9ed3d" @@ -13146,7 +13134,7 @@ remark-stringify@6.0.4, remark-stringify@^6.0.0: unherit "^1.0.4" xtend "^4.0.1" -remark-stringify@^8.1.0: +remark-stringify@^8.1.0, remark-stringify@^8.1.1: version "8.1.1" resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-8.1.1.tgz#e2a9dc7a7bf44e46a155ec78996db896780d8ce5" integrity sha512-q4EyPZT3PcA3Eq7vPpT6bIdokXzFGp9i85igjmhRyXWmPs0Y6/d2FYwUNotKAWyLch7g0ASZJn/KHHcHZQ163A== @@ -13166,13 +13154,6 @@ remark-stringify@^8.1.0: unherit "^1.0.4" xtend "^4.0.1" -remark-stringify@^9.0.1: - version "9.0.1" - resolved "https://registry.yarnpkg.com/remark-stringify/-/remark-stringify-9.0.1.tgz#576d06e910548b0a7191a71f27b33f1218862894" - integrity sha512-mWmNg3ZtESvZS8fv5PTvaPckdL4iNlCHTt8/e/8oN08nArHRHjNZMKzA/YW3+p7/lYqIw4nx1XsjCBo/AxNChg== - dependencies: - mdast-util-to-markdown "^0.6.0" - remark@^10.0.1: version "10.0.1" resolved "https://registry.yarnpkg.com/remark/-/remark-10.0.1.tgz#3058076dc41781bf505d8978c291485fe47667df" From 0a2063fbc5367f7d31bbc7d5197677e662ddf2cf Mon Sep 17 00:00:00 2001 From: George Song Date: Wed, 3 Mar 2021 10:56:11 -0800 Subject: [PATCH 04/28] feat: replicate cloud-native-postgresql v1.1.0 docs Former-commit-id: aafb9fa959c8d1bece3c1a6b85cbaa765c346961 --- temp_kubernetes/original/.gitignore | 2 + temp_kubernetes/original/README.md | 22 + .../original/graffle/apps-in-k8s.graffle | Bin .../original/graffle/apps-outside-k8s.graffle | Bin .../graffle/architecture-in-k8s.graffle | Bin .../original/graffle/architecture-r.graffle | Bin .../original/graffle/architecture-rw.graffle | Bin .../network-storage-architecture.graffle | Bin ...d-architecture-storage-replication.graffle | Bin .../graffle/public-cloud-architecture.graffle | Bin .../shared-nothing-architecture.graffle | Bin temp_kubernetes/original/mkdocs.yml | 81 +- temp_kubernetes/original/src/api_reference.md | 6 +- temp_kubernetes/original/src/architecture.md | 237 +- .../original/src/backup_recovery.md | 838 +++--- .../original/src/before_you_start.md | 90 +- temp_kubernetes/original/src/bootstrap.md | 608 ++--- temp_kubernetes/original/src/cloud_setup.md | 220 +- temp_kubernetes/original/src/cnp-plugin.md | 140 + .../original/src/container_images.md | 106 +- temp_kubernetes/original/src/credits.md | 32 +- temp_kubernetes/original/src/e2e.md | 104 +- temp_kubernetes/original/src/evaluation.md | 0 .../original/src/expose_pg_services.md | 286 +- temp_kubernetes/original/src/failure_modes.md | 308 ++- .../original/src/images/apps-in-k8s.png | 0 .../original/src/images/apps-outside-k8s.png | 0 .../src/images/architecture-in-k8s.png | 0 .../original/src/images/architecture-r.png | 0 .../original/src/images/architecture-rw.png | 0 .../images/network-storage-architecture.png | 0 .../src/images/operator-capability-level.png | 0 ...cloud-architecture-storage-replication.png | 0 .../src/images/public-cloud-architecture.png | 0 .../images/shared-nothing-architecture.png | 0 temp_kubernetes/original/src/index.md | 158 +- temp_kubernetes/original/src/installation.md | 156 +- .../original/src/kubernetes_upgrade.md | 172 +- temp_kubernetes/original/src/license_keys.md | 74 +- .../src/operator_capability_levels.md | 792 +++--- .../original/src/postgresql_conf.md | 454 ++-- temp_kubernetes/original/src/quickstart.md | 342 +-- .../original/src/rolling_update.md | 84 +- temp_kubernetes/original/src/samples.md | 0 .../original/src/samples/backup-example.yaml | 0 .../src/samples/cluster-example-custom.yaml | 53 +- .../src/samples/cluster-example-epas.yaml | 0 .../src/samples/cluster-example-full.yaml | 208 +- .../src/samples/cluster-example-initdb.yaml | 0 .../src/samples/cluster-example-secret.yaml | 0 .../samples/cluster-example-syncreplicas.yaml | 0 .../original/src/samples/cluster-example.yaml | 0 .../src/samples/cluster-expose-service.yaml | 72 +- .../src/samples/cluster-pvc-template.yaml | 0 .../src/samples/cluster-restore-pitr.yaml | 0 .../original/src/samples/cluster-restore.yaml | 0 .../cluster-storage-class-with-backup.yaml | 0 .../src/samples/cluster-storage-class.yaml | 0 .../samples/postgresql-operator-0.3.0.yaml | 897 ------- .../samples/postgresql-operator-0.4.0.yaml | 1092 -------- .../samples/postgresql-operator-0.5.0.yaml | 1144 -------- .../samples/postgresql-operator-0.6.0.yaml | 1247 --------- .../samples/postgresql-operator-0.7.0.yaml | 1262 --------- .../samples/postgresql-operator-0.8.0.yaml | 2362 ----------------- .../samples/postgresql-operator-1.0.0.yaml | 2362 ----------------- .../src/samples/scheduled-backup-example.yaml | 0 .../original/src/samples/subscription.yaml | 20 +- temp_kubernetes/original/src/security.md | 275 +- .../original/src/ssl_connections.md | 0 temp_kubernetes/original/src/storage.md | 0 temp_kubernetes/original/src/use_cases.md | 0 71 files changed, 3085 insertions(+), 13221 deletions(-) create mode 100644 temp_kubernetes/original/.gitignore create mode 100644 temp_kubernetes/original/README.md mode change 100755 => 100644 temp_kubernetes/original/graffle/apps-in-k8s.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/apps-outside-k8s.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/architecture-in-k8s.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/architecture-r.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/architecture-rw.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/network-storage-architecture.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/public-cloud-architecture-storage-replication.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/public-cloud-architecture.graffle mode change 100755 => 100644 temp_kubernetes/original/graffle/shared-nothing-architecture.graffle mode change 100755 => 100644 temp_kubernetes/original/mkdocs.yml mode change 100755 => 100644 temp_kubernetes/original/src/api_reference.md mode change 100755 => 100644 temp_kubernetes/original/src/architecture.md mode change 100755 => 100644 temp_kubernetes/original/src/backup_recovery.md mode change 100755 => 100644 temp_kubernetes/original/src/before_you_start.md mode change 100755 => 100644 temp_kubernetes/original/src/bootstrap.md mode change 100755 => 100644 temp_kubernetes/original/src/cloud_setup.md create mode 100644 temp_kubernetes/original/src/cnp-plugin.md mode change 100755 => 100644 temp_kubernetes/original/src/container_images.md mode change 100755 => 100644 temp_kubernetes/original/src/credits.md mode change 100755 => 100644 temp_kubernetes/original/src/e2e.md mode change 100755 => 100644 temp_kubernetes/original/src/evaluation.md mode change 100755 => 100644 temp_kubernetes/original/src/expose_pg_services.md mode change 100755 => 100644 temp_kubernetes/original/src/failure_modes.md mode change 100755 => 100644 temp_kubernetes/original/src/images/apps-in-k8s.png mode change 100755 => 100644 temp_kubernetes/original/src/images/apps-outside-k8s.png mode change 100755 => 100644 temp_kubernetes/original/src/images/architecture-in-k8s.png mode change 100755 => 100644 temp_kubernetes/original/src/images/architecture-r.png mode change 100755 => 100644 temp_kubernetes/original/src/images/architecture-rw.png mode change 100755 => 100644 temp_kubernetes/original/src/images/network-storage-architecture.png mode change 100755 => 100644 temp_kubernetes/original/src/images/operator-capability-level.png mode change 100755 => 100644 temp_kubernetes/original/src/images/public-cloud-architecture-storage-replication.png mode change 100755 => 100644 temp_kubernetes/original/src/images/public-cloud-architecture.png mode change 100755 => 100644 temp_kubernetes/original/src/images/shared-nothing-architecture.png mode change 100755 => 100644 temp_kubernetes/original/src/index.md mode change 100755 => 100644 temp_kubernetes/original/src/installation.md mode change 100755 => 100644 temp_kubernetes/original/src/kubernetes_upgrade.md mode change 100755 => 100644 temp_kubernetes/original/src/license_keys.md mode change 100755 => 100644 temp_kubernetes/original/src/operator_capability_levels.md mode change 100755 => 100644 temp_kubernetes/original/src/postgresql_conf.md mode change 100755 => 100644 temp_kubernetes/original/src/quickstart.md mode change 100755 => 100644 temp_kubernetes/original/src/rolling_update.md mode change 100755 => 100644 temp_kubernetes/original/src/samples.md mode change 100755 => 100644 temp_kubernetes/original/src/samples/backup-example.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-custom.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-epas.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-full.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-initdb.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-secret.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example-syncreplicas.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-example.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-expose-service.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-pvc-template.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-restore-pitr.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-restore.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-storage-class-with-backup.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/cluster-storage-class.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.3.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.4.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.5.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.6.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.7.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-0.8.0.yaml delete mode 100755 temp_kubernetes/original/src/samples/postgresql-operator-1.0.0.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/scheduled-backup-example.yaml mode change 100755 => 100644 temp_kubernetes/original/src/samples/subscription.yaml mode change 100755 => 100644 temp_kubernetes/original/src/security.md mode change 100755 => 100644 temp_kubernetes/original/src/ssl_connections.md mode change 100755 => 100644 temp_kubernetes/original/src/storage.md mode change 100755 => 100644 temp_kubernetes/original/src/use_cases.md diff --git a/temp_kubernetes/original/.gitignore b/temp_kubernetes/original/.gitignore new file mode 100644 index 00000000000..00757ba76fb --- /dev/null +++ b/temp_kubernetes/original/.gitignore @@ -0,0 +1,2 @@ +/site/ +/dist/ diff --git a/temp_kubernetes/original/README.md b/temp_kubernetes/original/README.md new file mode 100644 index 00000000000..2f23adf8ada --- /dev/null +++ b/temp_kubernetes/original/README.md @@ -0,0 +1,22 @@ +# Cloud Native PostgreSQL documentation + +The documentation is compiled using [MkDocs](https://www.mkdocs.org/) + +Run the following command to build the documentation +in the `dist` directory: + +``` bash +docker run --rm -v "$(pwd):$(pwd)" -w "$(pwd)" \ + quay.io/enterprisedb/cloud-native-docutils:mkdocs \ + mkdocs build -v -d dist +``` + +You can locally test the documentation by executing +the following command and pointing your browser +to http://127.0.0.1:8000/ + +``` bash +docker run --rm -v "$(pwd):$(pwd)" -w "$(pwd)" -p 8000:8000 \ + quay.io/enterprisedb/cloud-native-docutils:mkdocs \ + mkdocs serve -a 0.0.0.0:8000 +``` diff --git a/temp_kubernetes/original/graffle/apps-in-k8s.graffle b/temp_kubernetes/original/graffle/apps-in-k8s.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/apps-outside-k8s.graffle b/temp_kubernetes/original/graffle/apps-outside-k8s.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/architecture-in-k8s.graffle b/temp_kubernetes/original/graffle/architecture-in-k8s.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/architecture-r.graffle b/temp_kubernetes/original/graffle/architecture-r.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/architecture-rw.graffle b/temp_kubernetes/original/graffle/architecture-rw.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/network-storage-architecture.graffle b/temp_kubernetes/original/graffle/network-storage-architecture.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/public-cloud-architecture-storage-replication.graffle b/temp_kubernetes/original/graffle/public-cloud-architecture-storage-replication.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/public-cloud-architecture.graffle b/temp_kubernetes/original/graffle/public-cloud-architecture.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/graffle/shared-nothing-architecture.graffle b/temp_kubernetes/original/graffle/shared-nothing-architecture.graffle old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/mkdocs.yml b/temp_kubernetes/original/mkdocs.yml old mode 100755 new mode 100644 index 3da7e3fb837..a33c4321580 --- a/temp_kubernetes/original/mkdocs.yml +++ b/temp_kubernetes/original/mkdocs.yml @@ -1,44 +1,37 @@ -# Documentation produced with MkDocs (https://www.mkdocs.org/) -# You can use a pyenv virtual environment with PIP's requirements -# defined in the `requirements.txt` file. -# -# Type `mkdocs build` to build the documentation in the `site` directory. -# You can test the documentation by typing `mkdocs serve` and point your -# browser to http://127.0.0.1:8000/. - -site_name: Cloud Native PostgreSQL -site_author: EnterpriseDB Corporation -docs_dir: src - -theme: readthedocs - -markdown_extensions: - - admonition - - def_list - -nav: - - index.md - - before_you_start.md - - evaluation.md - - use_cases.md - - architecture.md - - installation.md - - quickstart.md - - cloud_setup.md - - bootstrap.md - - security.md - - failure_modes.md - - rolling_update.md - - backup_recovery.md - - postgresql_conf.md - - storage.md - - samples.md - - expose_pg_services.md - - ssl_connections.md - - kubernetes_upgrade.md - - e2e.md - - license_keys.md - - container_images.md - - operator_capability_levels.md - - api_reference.md - - credits.md +site_name: Cloud Native PostgreSQL +site_author: EnterpriseDB Corporation +docs_dir: src + +theme: readthedocs + +markdown_extensions: + - admonition + - def_list + +nav: + - index.md + - before_you_start.md + - evaluation.md + - use_cases.md + - architecture.md + - installation.md + - quickstart.md + - cloud_setup.md + - bootstrap.md + - security.md + - failure_modes.md + - rolling_update.md + - backup_recovery.md + - postgresql_conf.md + - storage.md + - samples.md + - expose_pg_services.md + - ssl_connections.md + - kubernetes_upgrade.md + - e2e.md + - cnp-plugin.md + - license_keys.md + - container_images.md + - operator_capability_levels.md + - api_reference.md + - credits.md diff --git a/temp_kubernetes/original/src/api_reference.md b/temp_kubernetes/original/src/api_reference.md old mode 100755 new mode 100644 index 583e0cbdd0a..cb67f3d4808 --- a/temp_kubernetes/original/src/api_reference.md +++ b/temp_kubernetes/original/src/api_reference.md @@ -62,7 +62,7 @@ BackupList contains a list of Backup | Field | Description | Scheme | Required | | -------------------- | ------------------------------ | -------------------- | -------- | | metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of backups | \[][Backup](#backup) | true | +| items | List of backups | [][Backup](#backup) | true | ## BackupSpec @@ -179,7 +179,7 @@ ClusterList contains a list of Cluster | Field | Description | Scheme | Required | | -------------------- | ------------------------------ | -------------------- | -------- | | metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of clusters | \[][Cluster](#cluster) | true | +| items | List of clusters | [][Cluster](#cluster) | true | ## ClusterSpec @@ -341,7 +341,7 @@ ScheduledBackupList contains a list of ScheduledBackup | Field | Description | Scheme | Required | | -------------------- | ------------------------------ | -------------------- | -------- | | metadata | Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta) | false | -| items | List of clusters | \[][ScheduledBackup](#scheduledbackup) | true | +| items | List of clusters | [][ScheduledBackup](#scheduledbackup) | true | ## ScheduledBackupSpec diff --git a/temp_kubernetes/original/src/architecture.md b/temp_kubernetes/original/src/architecture.md old mode 100755 new mode 100644 index bb070c96c7f..bcd2f7ba6f8 --- a/temp_kubernetes/original/src/architecture.md +++ b/temp_kubernetes/original/src/architecture.md @@ -1,114 +1,123 @@ -# Architecture - -For High Availability goals, the PostgreSQL database management system provides administrators with built-in **physical replication** capabilities based on **Write Ahead Log (WAL) shipping**. - -PostgreSQL supports both asynchronous and synchronous streaming replication, as well as asynchronous file-based log shipping (normally used as a fallback option, for example, to store WAL files in an object store). Replicas are usually called *standby servers* and can also be used for read-only workloads, thanks to the *Hot Standby* feature. - -Cloud Native PostgreSQL currently supports clusters based on asynchronous and synchronous streaming replication to manage multiple hot standby replicas, with the following specifications: - -* One primary, with optional multiple hot standby replicas for High Availability -* Available services for applications: - * `-rw`: applications connect to the only primary instance of the cluster - * `-r`: applications connect to any of the instances for read-only workloads -* Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster: - * PostgreSQL instances should reside on different Kubernetes worker nodes and share only the network - * PostgreSQL instances can reside in different availability zones in the same region - * All nodes of a PostgreSQL cluster should reside in the same region - -## Read-write workloads - -Applications can decide to connect to the PostgreSQL instance elected as *current primary* -by the Kubernetes operator, as depicted in the following diagram: - -![Applications writing to the single primary](./images/architecture-rw.png) - -Applications can use the `-rw` suffix service. - -In case of temporary or permanent unavailability of the primary, Kubernetes -will move the `-rw` to another instance of the cluster for high availability -purposes. - -## Read-only workloads - -!!! Important - Applications must be aware of the limitations that [Hot Standby](https://www.postgresql.org/docs/current/hot-standby.html) - presents and familiar with the way PostgreSQL operates when dealing with these workloads. - -Applications can access any PostgreSQL instance at any time through the `-r` -service made available by the operator at connection time. - -The following diagram shows the architecture: - -![Applications reading from any instance in round robin](./images/architecture-r.png) - -## Application deployments - -Applications are supposed to work with the services created by Cloud Native PostgreSQL -in the same Kubernetes cluster: - -* `[cluster name]-rw` -* `[cluster name]-r` - -Those services are entirely managed by the Kubernetes cluster and -implement a form of Virtual IP as described in the -["Service" page of the Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies). - -!!! Hint - It is highly recommended to use those services in your applications, - and avoid connecting directly to a specific PostgreSQL instance, as the latter - can change during the cluster lifetime. - -You can use these services in your applications through: - -* DNS resolution -* environment variables - -As far as the credentials to connect to PostgreSQL are concerned, you can -use the secrets generated by the operator. - -!!! Warning - The operator will create another service, named `[cluster name]-any`. That - service is used internally to manage PostgreSQL instance discovery. - It's not supposed to be used directly by applications. - -## DNS resolution - -You can use the Kubernetes DNS service, which is required by this operator, -to point to a given server. -You can do that by just using the name of the service if the application is -deployed in the same namespace as the PostgreSQL cluster. -In case the PostgreSQL cluster resides in a different namespace, you can use the -full qualifier: `service-name.namespace-name`. - -DNS is the preferred and recommended discovery method. - -## Environment variables - -If you deploy your application in the same namespace that contains the -PostgreSQL cluster, you can also use environment variables to connect to the database. - -For example, suppose that your PostgreSQL cluster is called `pg-database`, -you can use the following environment variables in your applications: - -* `PG_DATABASE_R_SERVICE_HOST`: the IP address of the service - pointing to all the PostgreSQL instances for read-only workloads - -* `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the - service pointing to the *primary* instance of the cluster - -## Secrets - -The PostgreSQL operator will generate two secrets for every PostgreSQL cluster -it deploys: - -* `[cluster name]-superuser` -* `[cluster name]-app` - -The secrets contain the username, password, and a working -[`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html) -respectively for the `postgres` user and the *owner* of the database. - -The `-app` credentials are the ones that should be used by applications -connecting to the PostgreSQL cluster. - -The `-superuser` ones are supposed to be used only for administrative purposes. +# Architecture + +For High Availability goals, the PostgreSQL database management system provides administrators with built-in **physical replication** capabilities based on **Write Ahead Log (WAL) shipping**. + +PostgreSQL supports both asynchronous and synchronous streaming replication, as well as asynchronous file-based log shipping (normally used as a fallback option, for example, to store WAL files in an object store). Replicas are usually called *standby servers* and can also be used for read-only workloads, thanks to the *Hot Standby* feature. + +Cloud Native PostgreSQL currently supports clusters based on asynchronous and synchronous streaming replication to manage multiple hot standby replicas, with the following specifications: + +* One primary, with optional multiple hot standby replicas for High Availability +* Available services for applications: + * `-rw`: applications connect to the only primary instance of the cluster + * `-ro`: applications connect to the only hot standby replicas for read-only-workloads + * `-r`: applications connect to any of the instances for read-only workloads +* Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster: + * PostgreSQL instances should reside on different Kubernetes worker nodes and share only the network + * PostgreSQL instances can reside in different availability zones in the same region + * All nodes of a PostgreSQL cluster should reside in the same region + +## Read-write workloads + +Applications can decide to connect to the PostgreSQL instance elected as *current primary* +by the Kubernetes operator, as depicted in the following diagram: + +![Applications writing to the single primary](./images/architecture-rw.png) + +Applications can use the `-rw` suffix service. + +In case of temporary or permanent unavailability of the primary, Kubernetes +will move the `-rw` to another instance of the cluster for high availability +purposes. + +## Read-only workloads + +!!! Important + Applications must be aware of the limitations that [Hot Standby](https://www.postgresql.org/docs/current/hot-standby.html) + presents and familiar with the way PostgreSQL operates when dealing with these workloads. + +Applications can access any PostgreSQL instance at any time through the `-r` +service made available by the operator at connection time. + +The following diagram shows the architecture: + +![Applications reading from any instance in round robin](./images/architecture-r.png) + +Applications can also access hot standby replicas through the `-ro` service made available +by the operator. This service enables the application to offload read-only queries from the +primary node. + +## Application deployments + +Applications are supposed to work with the services created by Cloud Native PostgreSQL +in the same Kubernetes cluster: + +* `[cluster name]-rw` +* `[cluster name]-ro` +* `[cluster name]-r` + +Those services are entirely managed by the Kubernetes cluster and +implement a form of Virtual IP as described in the +["Service" page of the Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies). + +!!! Hint + It is highly recommended to use those services in your applications, + and avoid connecting directly to a specific PostgreSQL instance, as the latter + can change during the cluster lifetime. + +You can use these services in your applications through: + +* DNS resolution +* environment variables + +As far as the credentials to connect to PostgreSQL are concerned, you can +use the secrets generated by the operator. + +!!! Warning + The operator will create another service, named `[cluster name]-any`. That + service is used internally to manage PostgreSQL instance discovery. + It's not supposed to be used directly by applications. + +## DNS resolution + +You can use the Kubernetes DNS service, which is required by this operator, +to point to a given server. +You can do that by just using the name of the service if the application is +deployed in the same namespace as the PostgreSQL cluster. +In case the PostgreSQL cluster resides in a different namespace, you can use the +full qualifier: `service-name.namespace-name`. + +DNS is the preferred and recommended discovery method. + +## Environment variables + +If you deploy your application in the same namespace that contains the +PostgreSQL cluster, you can also use environment variables to connect to the database. + +For example, suppose that your PostgreSQL cluster is called `pg-database`, +you can use the following environment variables in your applications: + +* `PG_DATABASE_R_SERVICE_HOST`: the IP address of the service + pointing to all the PostgreSQL instances for read-only workloads + +* `PG_DATABASE_RO_SERVICE_HOST`: the IP address of the + service pointing to all hot-standby replicas of the cluster + +* `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the + service pointing to the *primary* instance of the cluster + +## Secrets + +The PostgreSQL operator will generate two secrets for every PostgreSQL cluster +it deploys: + +* `[cluster name]-superuser` +* `[cluster name]-app` + +The secrets contain the username, password, and a working +[`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html) +respectively for the `postgres` user and the *owner* of the database. + +The `-app` credentials are the ones that should be used by applications +connecting to the PostgreSQL cluster. + +The `-superuser` ones are supposed to be used only for administrative purposes. diff --git a/temp_kubernetes/original/src/backup_recovery.md b/temp_kubernetes/original/src/backup_recovery.md old mode 100755 new mode 100644 index b474efa9c2b..4491e9f1643 --- a/temp_kubernetes/original/src/backup_recovery.md +++ b/temp_kubernetes/original/src/backup_recovery.md @@ -1,419 +1,419 @@ -# Backup and Recovery - -The operator can orchestrate a continuous backup infrastructure -that is based on the [Barman](https://pgbarman.org) tool. Instead -of using the classical architecture with a Barman server, which -backup many PostgreSQL instances, the operator will use the -`barman-cloud-wal-archive` and `barman-cloud-backup` tools. -As a result, base backups will be *tarballs*. Both base backups and WAL files -can be compressed and encrypted. - -For this, it is required an image with `barman-cli-cloud` installed. -You can use the image `quay.io/enterprisedb/postgresql` for this scope, -as it is composed of a community PostgreSQL image and the latest -`barman-cli-cloud` package. - -## Cloud credentials - -You can archive the backup files in any service whose API is compatible -with AWS S3. You will need the following information about your -environment: - -- `ACCESS_KEY_ID`: the ID of the access key that will be used - to upload files in S3 - -- `ACCESS_SECRET_KEY`: the secret part of the previous access key - -- `ACCESS_SESSION_TOKEN`: the optional session token in case it is required - -The access key used must have permission to upload files in -the bucket. Given that, you must create a k8s secret with the -credentials, and you can do that with the following command: - -```sh -kubectl create secret generic aws-creds \ - --from-literal=ACCESS_KEY_ID= \ - --from-literal=ACCESS_SECRET_KEY= -# --from-literal=ACCESS_SESSION_TOKEN= # if required -``` - -The credentials will be stored inside Kubernetes and will be encrypted -if encryption at rest is configured in your installation. - -## Configuring the Cluster - -### S3 - -Given that secret, you can configure your cluster like in -the following example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - destinationPath: "" - s3Credentials: - accessKeyId: - name: aws-creds - key: ACCESS_KEY_ID - secretAccessKey: - name: aws-creds - key: ACCESS_SECRET_KEY -``` - -The destination path can be every URL pointing to a folder where -the instance can upload the WAL files, e.g. -`s3://BUCKET_NAME/path/to/folder`. - -### Other S3-compatible Object Storages providers - -In case you're using S3-compatible object storage, like MinIO or -Linode Object Storage, you can specify an endpoint instead of using the -default S3 one. - -In this example, it will use the `bucket` bucket of Linode in the region -`us-east1`. - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - destinationPath: "" - endpointURL: bucket.us-east1.linodeobjects.com - s3Credentials: - [...] -``` - -### MinIO Gateway - -Optionally, you can use MinIO Gateway as a common interface which -relays backup objects to other cloud storage solutions, like S3, GCS or -Azure. For more information, please refer to [MinIO official documentation](https://docs.min.io/). - -Specifically, the Cloud Native PostgreSQL cluster can directly point to a local -MinIO Gateway as an endpoint, using previously created credentials and service. - -MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance. -Therefore you must create them in the same namespace: - -```sh -kubectl create secret generic minio-creds \ - --from-literal=MINIO_ACCESS_KEY= \ - --from-literal=MINIO_SECRET_KEY= -``` - -!!! NOTE "Note" - Cloud Object Storage credentials will be used only by MinIO Gateway in this case. - -!!! Important - In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a - `ClusterIP` service on port `9000` bound to the MinIO Gateway instance. - -For example: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: minio-gateway-service -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - selector: - app: minio -``` - -!!! Warning - At the time of writing this documentation, the official [MinIO Operator](https://github.com/minio/minio-operator/issues/71) - for Kubernetes does not support the gateway feature. As such, we will use a - `deployment` instead. - -The MinIO deployment will use cloud storage credentials to upload objects to the -remote bucket and relay backup files to different locations. - -Here is an example using AWS S3 as Cloud Object Storage: - -```yaml -apiVersion: apps/v1 -kind: Deployment -[...] - spec: - containers: - - name: minio - image: minio/minio:RELEASE.2020-06-03T22-13-49Z - args: - - gateway - - s3 - env: - # MinIO access key and secret key - - name: MINIO_ACCESS_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_ACCESS_KEY - - name: MINIO_SECRET_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_SECRET_KEY - # AWS credentials - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_KEY_ID - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_SECRET_KEY -# Uncomment the below section if session token is required -# - name: AWS_SESSION_TOKEN -# valueFrom: -# secretKeyRef: -# name: aws-creds -# key: ACCESS_SESSION_TOKEN - ports: - - containerPort: 9000 -``` - -Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster` -definition, then choose a bucket name to replace `BUCKET_NAME`: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - destinationPath: s3://BUCKET_NAME/ - endpointURL: http://minio-gateway-service:9000 - s3Credentials: - accessKeyId: - name: minio-creds - key: MINIO_ACCESS_KEY - secretAccessKey: - name: minio-creds - key: MINIO_SECRET_KEY - [...] -``` - -Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before -proceeding with a backup. - -## On-demand backups - -To request a new backup, you need to create a new Backup resource -like the following one: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Backup -metadata: - name: backup-example -spec: - cluster: - name: pg-backup -``` - -The operator will start to orchestrate the cluster to take the -required backup using `barman-cloud-backup`. You can check -the backup status using the plain `kubectl describe backup ` -command: - -```text -Name: backup-example -Namespace: default -Labels: -Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 -Spec: - Cluster: - Name: pg-backup -Status: - Phase: running - Started At: 2020-10-26T13:57:40Z -Events: -``` - -When the backup has been completed, the phase will be `completed` -like in the following example: - -```text -Name: backup-example -Namespace: default -Labels: -Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 -Spec: - Cluster: - Name: pg-backup -Status: - Backup Id: 20201026T135740 - Destination Path: s3://backups/ - Endpoint URL: http://minio:9000 - Phase: completed - s3Credentials: - Access Key Id: - Key: ACCESS_KEY_ID - Name: minio - Secret Access Key: - Key: ACCESS_SECRET_KEY - Name: minio - Server Name: pg-backup - Started At: 2020-10-26T13:57:40Z - Stopped At: 2020-10-26T13:57:44Z -Events: -``` - -!!!Important - This feature will not backup the secrets for the superuser and the - application user. The secrets are supposed to be backed up as part of - the standard backup procedures for the Kubernetes cluster. - -## Scheduled backups - -You can also schedule your backups periodically by creating a -resource named `ScheduledBackup`. The latter is similar to a -`Backup` but with an added field, called `schedule`. - -This field is a [Cron](https://en.wikipedia.org/wiki/Cron) schedule -specification with a prepended field for seconds. This schedule format -is the same used in Kubernetes CronJobs. - -This is an example of a scheduled backup: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: ScheduledBackup -metadata: - name: backup-example -spec: - schedule: "0 0 0 * * *" - cluster: - name: pg-backup -``` - -The proposed specification will schedule a backup every day at midnight. - -## WAL archiving - -WAL archiving is enabled as soon as you choose a destination path -and you configure your cloud credentials. - -If required, you can choose to compress WAL files as soon as they -are uploaded and/or encrypt them: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - [...] - wal: - compression: gzip - encryption: AES256 -``` - -You can configure the encryption directly in your bucket, and the operator -will use it unless you override it in the cluster configuration. - -## Recovery - -You can use the data uploaded to the object storage to bootstrap a -new cluster from a backup. The operator will orchestrate the recovery -process using the `barman-cloud-restore` tool. - -When a backup is completed, the corresponding Kubernetes resource will -contain every information needed to restore it, just like in the -following example: - -```text -Name: backup-example -Namespace: default -Labels: -Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 -Spec: - Cluster: - Name: pg-backup -Status: - Backup Id: 20201026T135740 - Destination Path: s3://backups/ - Endpoint URL: http://minio:9000 - Phase: completed - s3Credentials: - Access Key Id: - Key: ACCESS_KEY_ID - Name: minio - Secret Access Key: - Key: ACCESS_SECRET_KEY - Name: minio - Server Name: pg-backup - Started At: 2020-10-26T13:57:40Z - Stopped At: 2020-10-26T13:57:44Z -Events: -``` - -Given the following cluster definition: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-restore -spec: - instances: 3 - - storage: - size: 5Gi - - bootstrap: - recovery: - backup: - name: backup-example -``` - -The operator will inject an init container in the first instance of the -cluster and the init container will start recovering the backup from the -object storage. - -When the recovery process is completed, the operator will start the instance -to allow it to recover the transaction log files needed for the -consistency of the restored data directory. - -Once the recovery is complete, the operator will set the required -superuser password into the instance. The new primary instance will start -as usual, and the remaining instances will join the cluster as replicas. - -The process is transparent for the user and it is managed by the instance -manager running in the Pods. - -You can optionally specify a `recoveryTarget` to perform a point in time -recovery. If left unspecified, the recovery will continue up to the latest -available WAL on the default target timeline (`current` for PostgreSQL up to -11, `latest` for version 12 and above). +# Backup and Recovery + +The operator can orchestrate a continuous backup infrastructure +that is based on the [Barman](https://pgbarman.org) tool. Instead +of using the classical architecture with a Barman server, which +backup many PostgreSQL instances, the operator will use the +`barman-cloud-wal-archive` and `barman-cloud-backup` tools. +As a result, base backups will be *tarballs*. Both base backups and WAL files +can be compressed and encrypted. + +For this, it is required an image with `barman-cli-cloud` installed. +You can use the image `quay.io/enterprisedb/postgresql` for this scope, +as it is composed of a community PostgreSQL image and the latest +`barman-cli-cloud` package. + +## Cloud credentials + +You can archive the backup files in any service whose API is compatible +with AWS S3. You will need the following information about your +environment: + +- `ACCESS_KEY_ID`: the ID of the access key that will be used + to upload files in S3 + +- `ACCESS_SECRET_KEY`: the secret part of the previous access key + +- `ACCESS_SESSION_TOKEN`: the optional session token in case it is required + +The access key used must have permission to upload files in +the bucket. Given that, you must create a k8s secret with the +credentials, and you can do that with the following command: + +```sh +kubectl create secret generic aws-creds \ + --from-literal=ACCESS_KEY_ID= \ + --from-literal=ACCESS_SECRET_KEY= +# --from-literal=ACCESS_SESSION_TOKEN= # if required +``` + +The credentials will be stored inside Kubernetes and will be encrypted +if encryption at rest is configured in your installation. + +## Configuring the Cluster + +### S3 + +Given that secret, you can configure your cluster like in +the following example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + destinationPath: "" + s3Credentials: + accessKeyId: + name: aws-creds + key: ACCESS_KEY_ID + secretAccessKey: + name: aws-creds + key: ACCESS_SECRET_KEY +``` + +The destination path can be every URL pointing to a folder where +the instance can upload the WAL files, e.g. +`s3://BUCKET_NAME/path/to/folder`. + +### Other S3-compatible Object Storages providers + +In case you're using S3-compatible object storage, like MinIO or +Linode Object Storage, you can specify an endpoint instead of using the +default S3 one. + +In this example, it will use the `bucket` bucket of Linode in the region +`us-east1`. + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + destinationPath: "" + endpointURL: bucket.us-east1.linodeobjects.com + s3Credentials: + [...] +``` + +### MinIO Gateway + +Optionally, you can use MinIO Gateway as a common interface which +relays backup objects to other cloud storage solutions, like S3, GCS or +Azure. For more information, please refer to [MinIO official documentation](https://docs.min.io/). + +Specifically, the Cloud Native PostgreSQL cluster can directly point to a local +MinIO Gateway as an endpoint, using previously created credentials and service. + +MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance. +Therefore you must create them in the same namespace: + +```sh +kubectl create secret generic minio-creds \ + --from-literal=MINIO_ACCESS_KEY= \ + --from-literal=MINIO_SECRET_KEY= +``` + +!!! NOTE "Note" + Cloud Object Storage credentials will be used only by MinIO Gateway in this case. + +!!! Important + In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a + `ClusterIP` service on port `9000` bound to the MinIO Gateway instance. + +For example: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: minio-gateway-service +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + selector: + app: minio +``` + +!!! Warning + At the time of writing this documentation, the official [MinIO Operator](https://github.com/minio/minio-operator/issues/71) + for Kubernetes does not support the gateway feature. As such, we will use a + `deployment` instead. + +The MinIO deployment will use cloud storage credentials to upload objects to the +remote bucket and relay backup files to different locations. + +Here is an example using AWS S3 as Cloud Object Storage: + +```yaml +apiVersion: apps/v1 +kind: Deployment +[...] + spec: + containers: + - name: minio + image: minio/minio:RELEASE.2020-06-03T22-13-49Z + args: + - gateway + - s3 + env: + # MinIO access key and secret key + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: minio-creds + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: minio-creds + key: MINIO_SECRET_KEY + # AWS credentials + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-creds + key: ACCESS_KEY_ID + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-creds + key: ACCESS_SECRET_KEY +# Uncomment the below section if session token is required +# - name: AWS_SESSION_TOKEN +# valueFrom: +# secretKeyRef: +# name: aws-creds +# key: ACCESS_SESSION_TOKEN + ports: + - containerPort: 9000 +``` + +Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster` +definition, then choose a bucket name to replace `BUCKET_NAME`: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + destinationPath: s3://BUCKET_NAME/ + endpointURL: http://minio-gateway-service:9000 + s3Credentials: + accessKeyId: + name: minio-creds + key: MINIO_ACCESS_KEY + secretAccessKey: + name: minio-creds + key: MINIO_SECRET_KEY + [...] +``` + +Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before +proceeding with a backup. + +## On-demand backups + +To request a new backup, you need to create a new Backup resource +like the following one: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Backup +metadata: + name: backup-example +spec: + cluster: + name: pg-backup +``` + +The operator will start to orchestrate the cluster to take the +required backup using `barman-cloud-backup`. You can check +the backup status using the plain `kubectl describe backup ` +command: + +```text +Name: backup-example +Namespace: default +Labels: +Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 +Kind: Backup +Metadata: + Creation Timestamp: 2020-10-26T13:57:40Z + Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example + UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 +Spec: + Cluster: + Name: pg-backup +Status: + Phase: running + Started At: 2020-10-26T13:57:40Z +Events: +``` + +When the backup has been completed, the phase will be `completed` +like in the following example: + +```text +Name: backup-example +Namespace: default +Labels: +Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 +Kind: Backup +Metadata: + Creation Timestamp: 2020-10-26T13:57:40Z + Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example + UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 +Spec: + Cluster: + Name: pg-backup +Status: + Backup Id: 20201026T135740 + Destination Path: s3://backups/ + Endpoint URL: http://minio:9000 + Phase: completed + s3Credentials: + Access Key Id: + Key: ACCESS_KEY_ID + Name: minio + Secret Access Key: + Key: ACCESS_SECRET_KEY + Name: minio + Server Name: pg-backup + Started At: 2020-10-26T13:57:40Z + Stopped At: 2020-10-26T13:57:44Z +Events: +``` + +!!!Important + This feature will not backup the secrets for the superuser and the + application user. The secrets are supposed to be backed up as part of + the standard backup procedures for the Kubernetes cluster. + +## Scheduled backups + +You can also schedule your backups periodically by creating a +resource named `ScheduledBackup`. The latter is similar to a +`Backup` but with an added field, called `schedule`. + +This field is a [Cron](https://en.wikipedia.org/wiki/Cron) schedule +specification with a prepended field for seconds. This schedule format +is the same used in Kubernetes CronJobs. + +This is an example of a scheduled backup: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: ScheduledBackup +metadata: + name: backup-example +spec: + schedule: "0 0 0 * * *" + cluster: + name: pg-backup +``` + +The proposed specification will schedule a backup every day at midnight. + +## WAL archiving + +WAL archiving is enabled as soon as you choose a destination path +and you configure your cloud credentials. + +If required, you can choose to compress WAL files as soon as they +are uploaded and/or encrypt them: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + [...] + wal: + compression: gzip + encryption: AES256 +``` + +You can configure the encryption directly in your bucket, and the operator +will use it unless you override it in the cluster configuration. + +## Recovery + +You can use the data uploaded to the object storage to bootstrap a +new cluster from a backup. The operator will orchestrate the recovery +process using the `barman-cloud-restore` tool. + +When a backup is completed, the corresponding Kubernetes resource will +contain every information needed to restore it, just like in the +following example: + +```text +Name: backup-example +Namespace: default +Labels: +Annotations: API Version: postgresql.k8s.enterprisedb.io/v1 +Kind: Backup +Metadata: + Creation Timestamp: 2020-10-26T13:57:40Z + Self Link: /apis/postgresql.k8s.enterprisedb.io/v1/namespaces/default/backups/backup-example + UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 +Spec: + Cluster: + Name: pg-backup +Status: + Backup Id: 20201026T135740 + Destination Path: s3://backups/ + Endpoint URL: http://minio:9000 + Phase: completed + s3Credentials: + Access Key Id: + Key: ACCESS_KEY_ID + Name: minio + Secret Access Key: + Key: ACCESS_SECRET_KEY + Name: minio + Server Name: pg-backup + Started At: 2020-10-26T13:57:40Z + Stopped At: 2020-10-26T13:57:44Z +Events: +``` + +Given the following cluster definition: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + instances: 3 + + storage: + size: 5Gi + + bootstrap: + recovery: + backup: + name: backup-example +``` + +The operator will inject an init container in the first instance of the +cluster and the init container will start recovering the backup from the +object storage. + +When the recovery process is completed, the operator will start the instance +to allow it to recover the transaction log files needed for the +consistency of the restored data directory. + +Once the recovery is complete, the operator will set the required +superuser password into the instance. The new primary instance will start +as usual, and the remaining instances will join the cluster as replicas. + +The process is transparent for the user and it is managed by the instance +manager running in the Pods. + +You can optionally specify a `recoveryTarget` to perform a point in time +recovery. If left unspecified, the recovery will continue up to the latest +available WAL on the default target timeline (`current` for PostgreSQL up to +11, `latest` for version 12 and above). diff --git a/temp_kubernetes/original/src/before_you_start.md b/temp_kubernetes/original/src/before_you_start.md old mode 100755 new mode 100644 index 4eddbfb22b9..4a43c516518 --- a/temp_kubernetes/original/src/before_you_start.md +++ b/temp_kubernetes/original/src/before_you_start.md @@ -1,45 +1,45 @@ -# Before You Start - -Before we get started, it is essential to go over some terminology that is -specific to Kubernetes and PostgreSQL. - -## Kubernetes terminology - -| Resource | Description | -|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the master(s). | -| [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. | -| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. | -| [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. | -| [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) | A *storage class* allows an administrator to define the classes of storage in a cluster, including provisioner (such as AWS EBS), reclaim policies, mount options, volume expansion, and so on. | -| [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) | A *persistent volume* (PV) is a resource in a Kubernetes cluster that represents storage that has been either manually provisioned by an administrator or dynamically provisioned by a *storage class* controller. A PV is associated with a pod using a *persistent volume claim* and its lifecycle is independent of any pod that uses it. Normally, a PV is a network volume, especially in the public cloud. A [*local persistent volume* (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a persistent volume that exists only on the particular node where the pod that uses it is running. | -| [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) | A *persistent volume claim* (PVC) represents a request for storage, which might include size, access mode, or a particular storage class. Similar to how a pod consumes node resources, a PVC consumes the resources of a PV. | -| [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | A *namespace* is a logical and isolated subset of a Kubernetes cluster and can be seen as a *virtual cluster* within the wider physical cluster. Namespaces allow administrators to create separated environments based on projects, departments, teams, and so on. | -| [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | *Role Based Access Control* (RBAC), also known as *role-based security*, is a method used in computer systems security to restrict access to the network and resources of a system to authorized users only. Kubernetes has a native API to control roles at the namespace and cluster level and associate them with specific resources and individuals. | -| [CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | A *custom resource definition* (CRD) is an extension of the Kubernetes API and allows developers to create new data types and objects, *called custom resources*. | -| [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) | An *operator* is a custom resource that automates those steps that are normally performed by a human operator when managing one or more applications or given services. An operator assists Kubernetes in making sure that the resource's defined state always matches the observed one. | -| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. | - -Cloud Native PostgreSQL requires Kubernetes 1.16 or higher. - -## PostgreSQL terminology - -| Resource | Description | -|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Instance | A Postgres server process running and listening on a pair "IP address(es)" and "TCP port" (usually 5432). | -| Primary | A PostgreSQL instance that can accept both read and write operations. | -| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). | -| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. | -| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. | - -## Cloud terminology - -| Resource | Description | -|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Region | A *region* in the Cloud is an isolated and independent geographic area organized in *availability zones*. Zones within a region have very little round-trip network latency. | -| Zone | An *availability zone* in the Cloud (also known as *zone*) is an area in a region where resources can be deployed. Usually, an availability zone corresponds to a data center or an isolated building of the same data center. | - -## What to do next - -Now that you have familiarized with the terminology, you can decide to -[test Cloud Native PostgreSQL on your laptop using a local cluster](quickstart.md) before deploying the operator in your selected cloud environment. +# Before You Start + +Before we get started, it is essential to go over some terminology that is +specific to Kubernetes and PostgreSQL. + +## Kubernetes terminology + +| Resource | Description | +|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the control plane node(s). | +| [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. | +| [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. | +| [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. | +| [Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/) | A *storage class* allows an administrator to define the classes of storage in a cluster, including provisioner (such as AWS EBS), reclaim policies, mount options, volume expansion, and so on. | +| [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) | A *persistent volume* (PV) is a resource in a Kubernetes cluster that represents storage that has been either manually provisioned by an administrator or dynamically provisioned by a *storage class* controller. A PV is associated with a pod using a *persistent volume claim* and its lifecycle is independent of any pod that uses it. Normally, a PV is a network volume, especially in the public cloud. A [*local persistent volume* (LPV)](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a persistent volume that exists only on the particular node where the pod that uses it is running. | +| [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) | A *persistent volume claim* (PVC) represents a request for storage, which might include size, access mode, or a particular storage class. Similar to how a pod consumes node resources, a PVC consumes the resources of a PV. | +| [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | A *namespace* is a logical and isolated subset of a Kubernetes cluster and can be seen as a *virtual cluster* within the wider physical cluster. Namespaces allow administrators to create separated environments based on projects, departments, teams, and so on. | +| [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) | *Role Based Access Control* (RBAC), also known as *role-based security*, is a method used in computer systems security to restrict access to the network and resources of a system to authorized users only. Kubernetes has a native API to control roles at the namespace and cluster level and associate them with specific resources and individuals. | +| [CRD](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | A *custom resource definition* (CRD) is an extension of the Kubernetes API and allows developers to create new data types and objects, *called custom resources*. | +| [Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) | An *operator* is a custom resource that automates those steps that are normally performed by a human operator when managing one or more applications or given services. An operator assists Kubernetes in making sure that the resource's defined state always matches the observed one. | +| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) | `kubectl` is the command-line tool used to manage a Kubernetes cluster. | + +Cloud Native PostgreSQL requires Kubernetes 1.16 or higher. + +## PostgreSQL terminology + +| Resource | Description | +|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Instance | A Postgres server process running and listening on a pair "IP address(es)" and "TCP port" (usually 5432). | +| Primary | A PostgreSQL instance that can accept both read and write operations. | +| Replica | A PostgreSQL instance replicating from the only primary instance in a cluster and is kept updated by reading a stream of Write-Ahead Log (WAL) records. A replica is also known as *standby* or *secondary* server. PostgreSQL relies on physical streaming replication (async/sync) and file-based log shipping (async). | +| Hot Standby | PostgreSQL feature that allows a *replica* to accept read-only workloads. | +| Cluster | To be intended as High Availability (HA) Cluster: a set of PostgreSQL instances made up by a single primary and an optional arbitrary number of replicas. | + +## Cloud terminology + +| Resource | Description | +|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Region | A *region* in the Cloud is an isolated and independent geographic area organized in *availability zones*. Zones within a region have very little round-trip network latency. | +| Zone | An *availability zone* in the Cloud (also known as *zone*) is an area in a region where resources can be deployed. Usually, an availability zone corresponds to a data center or an isolated building of the same data center. | + +## What to do next + +Now that you have familiarized with the terminology, you can decide to +[test Cloud Native PostgreSQL on your laptop using a local cluster](quickstart.md) before deploying the operator in your selected cloud environment. diff --git a/temp_kubernetes/original/src/bootstrap.md b/temp_kubernetes/original/src/bootstrap.md old mode 100755 new mode 100644 index 3bfc47f8759..b2bc794ff5f --- a/temp_kubernetes/original/src/bootstrap.md +++ b/temp_kubernetes/original/src/bootstrap.md @@ -1,304 +1,304 @@ -# Bootstrap - -This section describes the options you have to create a new -PostgreSQL cluster and the design rationale behind them. - -When a PostgreSQL cluster is defined, you can configure the -*bootstrap* method using the `bootstrap` section of the cluster -specification. - -In the following example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-initdb -spec: - instances: 3 - - bootstrap: - initdb: - database: appdb - owner: appuser - - storage: - size: 1Gi -``` - -The `initdb` bootstrap method is used. - -We currently support the following bootstrap methods: - -- `initdb`: initialise an empty PostgreSQL cluster -- `recovery`: create a PostgreSQL cluster restoring from an existing backup - and replaying all the available WAL files. - -## initdb - -The `initdb` bootstrap method is used to create a new PostgreSQL cluster from -scratch. It is the default one unless specified differently. - -The following example contains the full structure of the `initdb` configuration: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-initdb -spec: - instances: 3 - - superuserSecret: - name: superuser-secret - - bootstrap: - initdb: - database: appdb - owner: appuser - secret: - name: appuser-secret - - storage: - size: 1Gi -``` - -The above example of bootstrap will: - -1. create a new `PGDATA` folder using PostgreSQL's native `initdb` command -2. set a *superuser* password from the secret named `superuser-secret` -3. create an *unprivileged* user named `appuser` -4. set the password of the latter using the one in the `appuser-secret` secret -5. create a database called `appdb` owned by the `appuser` user. - -Thanks to the *convention over configuration paradigm*, you can let the -operator choose a default database name (`app`) and a default application -user name (same as the database name), as well as randomly generate a -secure password for both the superuser and the application user in -PostgreSQL. - -Alternatively, you can generate your passwords, store them as secrets, -and use them in the PostgreSQL cluster - as described in the above example. - -The supplied secrets must comply with the specifications of the -[`kubernetes.io/basic-auth` type](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret). -The operator will only use the `password` field of the secret, -ignoring the `username` one. If you plan to reuse the secret for application -connections, you can set the `username` field to the same value as the `owner`. - -The following is an example of a `basic-auth` secret: - -```yaml -apiVersion: v1 -data: - password: cGFzc3dvcmQ= -kind: Secret -metadata: - name: cluster-example-app-user -type: kubernetes.io/basic-auth -``` - -The application database is the one that should be used to store application -data. Applications should connect to the cluster with the user that owns -the application database. - -!!! Important - Future implementations of the operator might allow you to create - additional users in a declarative configuration fashion. - -The superuser and the `postgres` database are supposed to be used only -by the operator to configure the cluster. - -In case you don't supply any database name, the operator will proceed -by convention and create the `app` database, and adds it to the cluster -definition using a *defaulting webhook*. -The user that owns the database defaults to the database name instead. - -The application user is not used internally by the operator, which instead -relies on the superuser to reconcile the cluster with the desired status. - -!!! Important - For now, changes to the name of the superuser secret are not applied - to the cluster. - -The actual PostgreSQL data directory is created via an invocation of the -`initdb` PostgreSQL command. If you need to add custom options to that -command (i.e., to change the locale used for the template databases or to -add data checksums), you can add them to the `options` section like in -the following example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-initdb -spec: - instances: 3 - - bootstrap: - initdb: - database: appdb - owner: appuser - options: - - "-k" - - "--locale=en_US" - storage: - size: 1Gi -``` - -### Compatibility Features - -EDB Postgres Advanced adds many compatibility features to the -plain community PostgreSQL. You can find more information about that -in the [EDB Postgres Advanced](https://www.enterprisedb.com/edb-docs/p/edb-postgres-advanced-server). - -Those features are already enabled during cluster creation on EPAS and -are not supported on the community PostgreSQL image. To disable them -you can use the `redwood` flag in the `initdb` section -like in the following example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-initdb -spec: - instances: 3 - imageName: - licenseKey: - - bootstrap: - initdb: - database: appdb - owner: appuser - redwood: false - storage: - size: 1Gi -``` - -!!! Important - EDB Postgres Advanced requires a valid license key (trial or production) to start. - -## recovery - -The `recovery` bootstrap mode lets you create a new cluster from -an existing backup. You can find more information about the recovery -feature in the ["Backup and recovery" page](backup_recovery.md). - -The following example contains the full structure of the `recovery` -section: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-initdb -spec: - instances: 3 - - superuserSecret: - name: superuser-secret - - bootstrap: - recovery: - backup: - name: backup-example - - storage: - size: 1Gi -``` - -This bootstrap method allows you to specify just a reference to the -backup that needs to be restored. - -The application database name and the application database user are preserved -from the backup that is being restored. The operator does not currently attempt -to backup the underlying secrets, as this is part of the usual maintenance -activity of the Kubernetes cluster itself. - -In case you don't supply any `superuserSecret`, a new one is automatically -generated with a secure and random password. The secret is then used to -reset the password for the `postgres` user of the cluster. - -By default, the recovery will continue up to the latest -available WAL on the default target timeline (`current` for PostgreSQL up to -11, `latest` for version 12 and above). -You can optionally specify a `recoveryTarget` to perform a point in time -recovery (see the ["Point in time recovery" chapter](#point-in-time-recovery)). - -### Point in time recovery - -Instead of replaying all the WALs up to the latest one, -we can ask PostgreSQL to stop replaying WALs at any given point in time. -PostgreSQL uses this technique to implement *point-in-time* recovery. -This allows you to restore the database to its state at any time after -the base backup was taken. - -The operator will generate the configuration parameters required for this -feature to work if a recovery target is specified like in the following -example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-restore-pitr -spec: - instances: 3 - - storage: - size: 5Gi - - bootstrap: - recovery: - backup: - name: backup-example - - recoveryTarget: - targetTime: "2020-11-26 15:22:00.00000+00" -``` - -Beside `targetTime`, you can use the following criteria to stop the recovery: - -- `targetXID` specify a transaction ID up to which recovery will proceed - -- `targetName` specify a restore point (created with `pg_create_restore_point` - to which recovery will proceed) - -- `targetLSN` specify the LSN of the write-ahead log location up to which - recovery will proceed - -- `targetImmediate` specify to stop as soon as a consistent state is - reached - -You can choose only a single one among the targets above in each -`recoveryTarget` configuration. - -Additionally, you can specify `targetTLI` force recovery to a specific -timeline. - -By default, the previous parameters are considered to be exclusive, stopping -just before the recovery target. You can request inclusive behavior, -stopping right after the recovery target, setting the `exclusive` parameter to -`false` like in the following example: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-restore-pitr -spec: - instances: 3 - - storage: - size: 5Gi - - bootstrap: - recovery: - backup: - name: backup-example - - recoveryTarget: - targetName: "maintenance-activity" - exclusive: false -``` +# Bootstrap + +This section describes the options you have to create a new +PostgreSQL cluster and the design rationale behind them. + +When a PostgreSQL cluster is defined, you can configure the +*bootstrap* method using the `bootstrap` section of the cluster +specification. + +In the following example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb +spec: + instances: 3 + + bootstrap: + initdb: + database: appdb + owner: appuser + + storage: + size: 1Gi +``` + +The `initdb` bootstrap method is used. + +We currently support the following bootstrap methods: + +- `initdb`: initialise an empty PostgreSQL cluster +- `recovery`: create a PostgreSQL cluster restoring from an existing backup + and replaying all the available WAL files. + +## initdb + +The `initdb` bootstrap method is used to create a new PostgreSQL cluster from +scratch. It is the default one unless specified differently. + +The following example contains the full structure of the `initdb` configuration: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb +spec: + instances: 3 + + superuserSecret: + name: superuser-secret + + bootstrap: + initdb: + database: appdb + owner: appuser + secret: + name: appuser-secret + + storage: + size: 1Gi +``` + +The above example of bootstrap will: + +1. create a new `PGDATA` folder using PostgreSQL's native `initdb` command +2. set a *superuser* password from the secret named `superuser-secret` +3. create an *unprivileged* user named `appuser` +4. set the password of the latter using the one in the `appuser-secret` secret +5. create a database called `appdb` owned by the `appuser` user. + +Thanks to the *convention over configuration paradigm*, you can let the +operator choose a default database name (`app`) and a default application +user name (same as the database name), as well as randomly generate a +secure password for both the superuser and the application user in +PostgreSQL. + +Alternatively, you can generate your passwords, store them as secrets, +and use them in the PostgreSQL cluster - as described in the above example. + +The supplied secrets must comply with the specifications of the +[`kubernetes.io/basic-auth` type](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret). +The operator will only use the `password` field of the secret, +ignoring the `username` one. If you plan to reuse the secret for application +connections, you can set the `username` field to the same value as the `owner`. + +The following is an example of a `basic-auth` secret: + +```yaml +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: cluster-example-app-user +type: kubernetes.io/basic-auth +``` + +The application database is the one that should be used to store application +data. Applications should connect to the cluster with the user that owns +the application database. + +!!! Important + Future implementations of the operator might allow you to create + additional users in a declarative configuration fashion. + +The superuser and the `postgres` database are supposed to be used only +by the operator to configure the cluster. + +In case you don't supply any database name, the operator will proceed +by convention and create the `app` database, and adds it to the cluster +definition using a *defaulting webhook*. +The user that owns the database defaults to the database name instead. + +The application user is not used internally by the operator, which instead +relies on the superuser to reconcile the cluster with the desired status. + +!!! Important + For now, changes to the name of the superuser secret are not applied + to the cluster. + +The actual PostgreSQL data directory is created via an invocation of the +`initdb` PostgreSQL command. If you need to add custom options to that +command (i.e., to change the locale used for the template databases or to +add data checksums), you can add them to the `options` section like in +the following example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb +spec: + instances: 3 + + bootstrap: + initdb: + database: appdb + owner: appuser + options: + - "-k" + - "--locale=en_US" + storage: + size: 1Gi +``` + +### Compatibility Features + +EDB Postgres Advanced adds many compatibility features to the +plain community PostgreSQL. You can find more information about that +in the [EDB Postgres Advanced](https://www.enterprisedb.com/edb-docs/p/edb-postgres-advanced-server). + +Those features are already enabled during cluster creation on EPAS and +are not supported on the community PostgreSQL image. To disable them +you can use the `redwood` flag in the `initdb` section +like in the following example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb +spec: + instances: 3 + imageName: + licenseKey: + + bootstrap: + initdb: + database: appdb + owner: appuser + redwood: false + storage: + size: 1Gi +``` + +!!! Important + EDB Postgres Advanced requires a valid license key (trial or production) to start. + +## recovery + +The `recovery` bootstrap mode lets you create a new cluster from +an existing backup. You can find more information about the recovery +feature in the ["Backup and recovery" page](backup_recovery.md). + +The following example contains the full structure of the `recovery` +section: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb +spec: + instances: 3 + + superuserSecret: + name: superuser-secret + + bootstrap: + recovery: + backup: + name: backup-example + + storage: + size: 1Gi +``` + +This bootstrap method allows you to specify just a reference to the +backup that needs to be restored. + +The application database name and the application database user are preserved +from the backup that is being restored. The operator does not currently attempt +to backup the underlying secrets, as this is part of the usual maintenance +activity of the Kubernetes cluster itself. + +In case you don't supply any `superuserSecret`, a new one is automatically +generated with a secure and random password. The secret is then used to +reset the password for the `postgres` user of the cluster. + +By default, the recovery will continue up to the latest +available WAL on the default target timeline (`current` for PostgreSQL up to +11, `latest` for version 12 and above). +You can optionally specify a `recoveryTarget` to perform a point in time +recovery (see the ["Point in time recovery" chapter](#point-in-time-recovery)). + +### Point in time recovery + +Instead of replaying all the WALs up to the latest one, +we can ask PostgreSQL to stop replaying WALs at any given point in time. +PostgreSQL uses this technique to implement *point-in-time* recovery. +This allows you to restore the database to its state at any time after +the base backup was taken. + +The operator will generate the configuration parameters required for this +feature to work if a recovery target is specified like in the following +example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-restore-pitr +spec: + instances: 3 + + storage: + size: 5Gi + + bootstrap: + recovery: + backup: + name: backup-example + + recoveryTarget: + targetTime: "2020-11-26 15:22:00.00000+00" +``` + +Beside `targetTime`, you can use the following criteria to stop the recovery: + +- `targetXID` specify a transaction ID up to which recovery will proceed + +- `targetName` specify a restore point (created with `pg_create_restore_point` + to which recovery will proceed) + +- `targetLSN` specify the LSN of the write-ahead log location up to which + recovery will proceed + +- `targetImmediate` specify to stop as soon as a consistent state is + reached + +You can choose only a single one among the targets above in each +`recoveryTarget` configuration. + +Additionally, you can specify `targetTLI` force recovery to a specific +timeline. + +By default, the previous parameters are considered to be exclusive, stopping +just before the recovery target. You can request inclusive behavior, +stopping right after the recovery target, setting the `exclusive` parameter to +`false` like in the following example: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-restore-pitr +spec: + instances: 3 + + storage: + size: 5Gi + + bootstrap: + recovery: + backup: + name: backup-example + + recoveryTarget: + targetName: "maintenance-activity" + exclusive: false +``` diff --git a/temp_kubernetes/original/src/cloud_setup.md b/temp_kubernetes/original/src/cloud_setup.md old mode 100755 new mode 100644 index 13eae60aa29..67e3021c869 --- a/temp_kubernetes/original/src/cloud_setup.md +++ b/temp_kubernetes/original/src/cloud_setup.md @@ -1,110 +1,110 @@ -# Cloud Setup - -This section describes how to orchestrate the deployment and management -of a PostgreSQL High Availability cluster in a [Kubernetes](https://www.kubernetes.io/) cluster in the public cloud using -[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -such as `Cluster`. Like any other Kubernetes application, it is deployed -using regular manifests written in YAML. - -The Cloud Native PostgreSQL Operator is systematically tested on the following public cloud environments: - -- [Microsoft Azure Kubernetes Service (AKS)](https://azure.microsoft.com/en-in/services/kubernetes-service/) -- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/) -- [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/) - -Below you can find specific instructions for each of the above environments. -Once the steps described on this page have been completed, and your `kubectl` -can connect to the desired cluster, you can install the operator and start -creating PostgreSQL `Clusters` by following the instructions you find in the -["Installation"](installation.md) section. - -!!! Important - `kubectl` is required to proceed with setup. - -## Microsoft Azure Kubernetes Service (AKS) - -Follow the instructions contained in -["Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure portal"](https://docs.microsoft.com/bs-latn-ba/azure/aks/kubernetes-walkthrough-portal) -available on the Microsoft documentation to set up your Kubernetes cluster in AKS. - -In particular, you need to configure `kubectl` to connect to your Kubernetes cluster -(called `myAKSCluster` using resources in `myResourceGroup` group) through the -`az aks get-credentials` command. -This command downloads the credentials and configures your `kubectl` to use them: - -```sh -az aks get-credentials --resource-group myResourceGroup --name myAKSCluster -``` - -!!! Note - You can change the name of the `myAKSCluster` cluster and the resource group `myResourceGroup` - from the Azure portal. - -You can use any of the storage classes that work with Azure disks: - -- `default` -- `managed-premium` - -!!! Seealso "About AKS storage classes" - For more information and details on the available storage classes in AKS, please refer to the - ["Storage classes" section in the official documentation from Microsoft](https://docs.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes). - -## Amazon Elastic Kubernetes Service (EKS) - -Follow the instructions contained in -["Creating an Amazon EKS Cluster"](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html) -available on the AWS documentation to set up your Kubernetes cluster in EKS. - -!!! Important - Keep in mind that Amazon puts limitations on how many pods a node can create. - It depends on the type of instance that you choose to use when you create - your cluster. - -After the setup, `kubectl` should point to your newly created EKS cluster. - -By default, a `gp2` storage class is available after cluster creation. However, Amazon EKS offers multiple -storage types that can be leveraged to create other storage classes for `Clusters`' volumes: - -- `gp2`: general-purpose SSD volume -- `io1`: provisioned IOPS SSD -- `st1`: throughput optimized HDD -- `sc1`: cold HDD - -!!! Seealso "About EKS storage classes" - For more information and details on the available storage classes in EKS, please refer to the - ["Amazon EBS Volume Types" page](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - in the official documentation for AWS and the - ["AWS-EBS" page](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs) - in the Kubernetes documentation. - -## Google Kubernetes Engine (GKE) - -Follow the instructions contained in -["Creating a cluster"](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster) -available on the Google Cloud documentation to set up your Kubernetes cluster in GKE. - -!!! Warning - Google Kubernetes Engine uses the deprecated `kube-dns` server instead of the - recommended [CoreDNS](https://coredns.io/). To work with Cloud Native PostgreSQL Operator, - you need to disable `kube-dns` and replace it with `coredns`. - -To replace `kube-dns` with `coredns` in your GKE cluster, follow these instructions: - -```sh -kubectl scale --replicas=0 deployment/kube-dns-autoscaler --namespace=kube-system -kubectl scale --replicas=0 deployment/kube-dns --namespace=kube-system -git clone https://github.com/coredns/deployment.git -./deployment/kubernetes/deploy.sh | kubectl apply -f - -``` - -By default, a `standard` storage class is available after cluster creation, using -standard hard disks. For other storage types, you'll need to create specific -storage classes. - -!!! Seealso "About GKE storage classes" - For more information and details on the available storage types in GKE, please refer to the - ["GCE PD" section](https://kubernetes.io/docs/concepts/storage/storage-classes/#gce-pd) - of the Kubernetes documentation and the - ["Persistent volumes with Persistent Disks" page](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes) - and related ones in the official documentation for Google Cloud. - +# Cloud Setup + +This section describes how to orchestrate the deployment and management +of a PostgreSQL High Availability cluster in a [Kubernetes](https://www.kubernetes.io/) cluster in the public cloud using +[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +such as `Cluster`. Like any other Kubernetes application, it is deployed +using regular manifests written in YAML. + +The Cloud Native PostgreSQL Operator is systematically tested on the following public cloud environments: + +- [Microsoft Azure Kubernetes Service (AKS)](https://azure.microsoft.com/en-in/services/kubernetes-service/) +- [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/) +- [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/) + +Below you can find specific instructions for each of the above environments. +Once the steps described on this page have been completed, and your `kubectl` +can connect to the desired cluster, you can install the operator and start +creating PostgreSQL `Clusters` by following the instructions you find in the +["Installation"](installation.md) section. + +!!! Important + `kubectl` is required to proceed with setup. + +## Microsoft Azure Kubernetes Service (AKS) + +Follow the instructions contained in +["Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure portal"](https://docs.microsoft.com/bs-latn-ba/azure/aks/kubernetes-walkthrough-portal) +available on the Microsoft documentation to set up your Kubernetes cluster in AKS. + +In particular, you need to configure `kubectl` to connect to your Kubernetes cluster +(called `myAKSCluster` using resources in `myResourceGroup` group) through the +`az aks get-credentials` command. +This command downloads the credentials and configures your `kubectl` to use them: + +```sh +az aks get-credentials --resource-group myResourceGroup --name myAKSCluster +``` + +!!! Note + You can change the name of the `myAKSCluster` cluster and the resource group `myResourceGroup` + from the Azure portal. + +You can use any of the storage classes that work with Azure disks: + +- `default` +- `managed-premium` + +!!! Seealso "About AKS storage classes" + For more information and details on the available storage classes in AKS, please refer to the + ["Storage classes" section in the official documentation from Microsoft](https://docs.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes). + +## Amazon Elastic Kubernetes Service (EKS) + +Follow the instructions contained in +["Creating an Amazon EKS Cluster"](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html) +available on the AWS documentation to set up your Kubernetes cluster in EKS. + +!!! Important + Keep in mind that Amazon puts limitations on how many pods a node can create. + It depends on the type of instance that you choose to use when you create + your cluster. + +After the setup, `kubectl` should point to your newly created EKS cluster. + +By default, a `gp2` storage class is available after cluster creation. However, Amazon EKS offers multiple +storage types that can be leveraged to create other storage classes for `Clusters`' volumes: + +- `gp2`: general-purpose SSD volume +- `io1`: provisioned IOPS SSD +- `st1`: throughput optimized HDD +- `sc1`: cold HDD + +!!! Seealso "About EKS storage classes" + For more information and details on the available storage classes in EKS, please refer to the + ["Amazon EBS Volume Types" page](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + in the official documentation for AWS and the + ["AWS-EBS" page](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs) + in the Kubernetes documentation. + +## Google Kubernetes Engine (GKE) + +Follow the instructions contained in +["Creating a cluster"](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster) +available on the Google Cloud documentation to set up your Kubernetes cluster in GKE. + +!!! Warning + Google Kubernetes Engine uses the deprecated `kube-dns` server instead of the + recommended [CoreDNS](https://coredns.io/). To work with Cloud Native PostgreSQL Operator, + you need to disable `kube-dns` and replace it with `coredns`. + +To replace `kube-dns` with `coredns` in your GKE cluster, follow these instructions: + +```sh +kubectl scale --replicas=0 deployment/kube-dns-autoscaler --namespace=kube-system +kubectl scale --replicas=0 deployment/kube-dns --namespace=kube-system +git clone https://github.com/coredns/deployment.git +./deployment/kubernetes/deploy.sh | kubectl apply -f - +``` + +By default, a `standard` storage class is available after cluster creation, using +standard hard disks. For other storage types, you'll need to create specific +storage classes. + +!!! Seealso "About GKE storage classes" + For more information and details on the available storage types in GKE, please refer to the + ["GCE PD" section](https://kubernetes.io/docs/concepts/storage/storage-classes/#gce-pd) + of the Kubernetes documentation and the + ["Persistent volumes with Persistent Disks" page](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes) + and related ones in the official documentation for Google Cloud. + diff --git a/temp_kubernetes/original/src/cnp-plugin.md b/temp_kubernetes/original/src/cnp-plugin.md new file mode 100644 index 00000000000..46ca9931427 --- /dev/null +++ b/temp_kubernetes/original/src/cnp-plugin.md @@ -0,0 +1,140 @@ +# Cloud Native PostgreSQL Plugin + +Cloud Native PostgreSQL provides a plugin for `kubectl` to manage a cluster in Kubernetes. +The plugin also works with `oc` in an OpenShift environment. + +## Install + +You can install the plugin in your system with: + +```sh +curl -sSfL \ + https://github.com/EnterpriseDB/kubectl-cnp/raw/main/install.sh | \ + sudo sh -s -- -b /usr/local/bin +``` + +## Use + +Once the plugin was installed and deployed, you can start using it like this: + +```shell +kubectl cnp +``` + +### Status + +The `status` command provides a brief of the current status of your cluster. + +```shell +kubectl cnp status cluster-example +``` + +```shell +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13 +Primary instance: cluster-example-1 +Instances: 3 +Ready instances: 3 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- +cluster-example-1 0/6000060 6927251808674721812 ✓ ✗ ✗ ✗ +cluster-example-2 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +cluster-example-3 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ + +``` + +You can also get a more verbose version of the status by adding `--verbose` or just `-v` + +```shell +kubectl cnp status cluster-example --verbose +``` + +```shell +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13 +Primary instance: cluster-example-1 +Instances: 3 +Ready instances: 3 + +PostgreSQL Configuration +archive_command = '/controller/manager wal-archive %p' +archive_mode = 'on' +archive_timeout = '5min' +full_page_writes = 'on' +hot_standby = 'true' +listen_addresses = '*' +logging_collector = 'off' +max_parallel_workers = '32' +max_replication_slots = '32' +max_worker_processes = '32' +port = '5432' +ssl = 'on' +ssl_ca_file = '/tmp/ca.crt' +ssl_cert_file = '/tmp/server.crt' +ssl_key_file = '/tmp/server.key' +unix_socket_directories = '/var/run/postgresql' +wal_keep_size = '512MB' +wal_level = 'logical' +wal_log_hints = 'on' + + +PostgreSQL HBA Rules +# Grant local access +local all all peer + +# Require client certificate authentication for the streaming_replica user +hostssl postgres streaming_replica all cert clientcert=1 +hostssl replication streaming_replica all cert clientcert=1 + +# Otherwise use md5 authentication +host all all all md5 + + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- +cluster-example-1 0/6000060 6927251808674721812 ✓ ✗ ✗ ✗ +cluster-example-2 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +cluster-example-3 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +``` + +The command also supports output in `yaml` and `json` format. + +### Promote + +The meaning of this command is to `promote` a pod in the cluster to primary, so you +can start with maintenance work or test a switch-over situation in your cluster + +```shell +kubectl cnp promote cluster-example cluster-example-2 +``` + +### Certificates + +Clusters created using the Cloud Native PostgreSQL operator work with a CA to sign +a TLS authentication certificate. + +To get a certificate, you need to provide a name for the secret to store +the credentials, the cluster name, and a user for this certificate + +```shell +kubectl cnp certificate cluster-cert --cnp-cluster cluster-example --cnp-user appuser +``` + +After the secrete it's created, you can get it using `kubectl` + +```shell +kubectl get secret cluster-cert +``` + +And the content of the same in plain text using the following commands: + +```shell +kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]' +``` diff --git a/temp_kubernetes/original/src/container_images.md b/temp_kubernetes/original/src/container_images.md old mode 100755 new mode 100644 index 5b066ccc260..524b69e5a1d --- a/temp_kubernetes/original/src/container_images.md +++ b/temp_kubernetes/original/src/container_images.md @@ -1,53 +1,53 @@ -# Container Image Requirements - -The Cloud Native PostgreSQL operator for Kubernetes is designed to -work with any compatible container image of PostgreSQL that complies -with the following requirements: - -- PostgreSQL 10+ executables that must be in the path: - - `initdb` - - `postgres` - - `pg_ctl` - - `pg_controldata` - - `pg_basebackup` -- Barman Cloud executables that must be in the path: - - `barman-cloud-wal-archive` - - `barman-cloud-wal-restore` - - `barman-cloud-backup` - - `barman-cloud-restore` - - `barman-cloud-backup-list` -- Sensible locale settings - -No entry point and/or command is required in the image definition, as Cloud -Native PostgreSQL overrides it with its instance manager. - -!!! Warning - Application Container Images will be used by Cloud Native PostgreSQL - in a **Primary with multiple/optional Hot Standby Servers Architecture** - only. - -EnterpriseDB provides and supports public container images for Cloud Native -PostgreSQL and publishes them on [Quay.io](https://quay.io/repository/enterprisedb/postgresql). - -## Image tag requirements - -While the image name can be anything valid for Docker, the Cloud Native -PostgreSQL operator relies on the *image tag* to detect the Postgres major -version carried out by the image. - -The image tag must start with a valid PostgreSQL major version number (e.g. 9.6 -or 12) optionally followed by a dot and the patch level. - -The prefix can be followed by any valid character combination that is valid and -accepted in a Docker tag, preceded by a dot, an underscore, or a minus sign. - -Examples of accepted image tags: - -- `9.6.19-alpine` -- `12.4` -- `11_1` -- `13` -- `12.3.2.1-1` - -!!! Warning - `latest` is not considered a valid tag for the image. +# Container Image Requirements + +The Cloud Native PostgreSQL operator for Kubernetes is designed to +work with any compatible container image of PostgreSQL that complies +with the following requirements: + +- PostgreSQL 10+ executables that must be in the path: + - `initdb` + - `postgres` + - `pg_ctl` + - `pg_controldata` + - `pg_basebackup` +- Barman Cloud executables that must be in the path: + - `barman-cloud-wal-archive` + - `barman-cloud-wal-restore` + - `barman-cloud-backup` + - `barman-cloud-restore` + - `barman-cloud-backup-list` +- Sensible locale settings + +No entry point and/or command is required in the image definition, as Cloud +Native PostgreSQL overrides it with its instance manager. + +!!! Warning + Application Container Images will be used by Cloud Native PostgreSQL + in a **Primary with multiple/optional Hot Standby Servers Architecture** + only. + +EnterpriseDB provides and supports public container images for Cloud Native +PostgreSQL and publishes them on [Quay.io](https://quay.io/repository/enterprisedb/postgresql). + +## Image tag requirements + +While the image name can be anything valid for Docker, the Cloud Native +PostgreSQL operator relies on the *image tag* to detect the Postgres major +version carried out by the image. + +The image tag must start with a valid PostgreSQL major version number (e.g. 9.6 +or 12) optionally followed by a dot and the patch level. + +The prefix can be followed by any valid character combination that is valid and +accepted in a Docker tag, preceded by a dot, an underscore, or a minus sign. + +Examples of accepted image tags: + +- `9.6.19-alpine` +- `12.4` +- `11_1` +- `13` +- `12.3.2.1-1` + +!!! Warning + `latest` is not considered a valid tag for the image. diff --git a/temp_kubernetes/original/src/credits.md b/temp_kubernetes/original/src/credits.md old mode 100755 new mode 100644 index a66ff5225d4..74c0d92c67e --- a/temp_kubernetes/original/src/credits.md +++ b/temp_kubernetes/original/src/credits.md @@ -1,15 +1,17 @@ -# Credits - -Cloud Native PostgreSQL (Operator for Kubernetes/OpenShift) has been designed, -developed, and tested by the EnterpriseDB Cloud Native team: - -- Leonardo Cecchi -- Marco Nenciarini -- Jonathan Gonzalez -- Francesco Canovai -- Jonathan Battiato -- Niccolò Fei -- Devin Nemec -- Adam Wright -- Gabriele Bartolini - +# Credits + +Cloud Native PostgreSQL (Operator for Kubernetes/OpenShift) has been designed, +developed, and tested by the EnterpriseDB Cloud Native team: + +- Gabriele Bartolini +- Jonathan Battiato +- Francesco Canovai +- Leonardo Cecchi +- Valerio Del Sarto +- Niccolò Fei +- Jonathan Gonzalez +- Danish Khan +- Marco Nenciarini +- Jitendra Wadle +- Adam Wright + diff --git a/temp_kubernetes/original/src/e2e.md b/temp_kubernetes/original/src/e2e.md old mode 100755 new mode 100644 index 9a4baecea27..4627c9b5166 --- a/temp_kubernetes/original/src/e2e.md +++ b/temp_kubernetes/original/src/e2e.md @@ -1,52 +1,52 @@ -# End-to-End Tests - -Cloud Native PostgreSQL operator is automatically tested after each -commit via a suite of **End-to-end (E2E) tests**. It ensures that -the operator correctly deploys and manages the PostgreSQL clusters. - -Moreover, the following Kubernetes versions are tested for each commit, -ensuring failure and bugs detection at an early stage of the development -process: - -* 1.20 -* 1.19 -* 1.18 -* 1.17 -* 1.16 - -The following PostgreSQL versions are tested: - -* PostgreSQL 13 -* PostgreSQL 12 -* PostgreSQL 11 -* PostgreSQL 10 - -For each tested version of Kubernetes and PostgreSQL, a Kubernetes -cluster is created using [kind](https://kind.sigs.k8s.io/), -and the following suite of E2E tests are performed on that cluster: - -* Installation of the operator; -* Creation of a `Cluster`; -* Usage of a persistent volume for data storage; -* Connection via services; -* Scale-up of a `Cluster`; -* Scale-down of a `Cluster`; -* Failover; -* Switchover; -* Manage PostgreSQL configuration changes; -* Rolling updates when changing PostgreSQL images; -* Backup and ScheduledBackups execution; -* Synchronous replication; -* Restore from backup; -* Pod affinity using `NodeSelector`; -* Metrics collection; -* Primary endpoint switch in case of failover in less than 10 seconds; -* Primary endpoint switch in case of switchover in less than 20 seconds; -* Recover from a degraded state in less than 60 seconds. - -The E2E tests suite is also run for OpenShift 4.6 and the latest Kubernetes -and PostgreSQL releases on clusters created on the following services: - -* Google GKE -* Amazon EKS -* Microsoft Azure AKS +# End-to-End Tests + +Cloud Native PostgreSQL operator is automatically tested after each +commit via a suite of **End-to-end (E2E) tests**. It ensures that +the operator correctly deploys and manages the PostgreSQL clusters. + +Moreover, the following Kubernetes versions are tested for each commit, +ensuring failure and bugs detection at an early stage of the development +process: + +* 1.20 +* 1.19 +* 1.18 +* 1.17 +* 1.16 + +The following PostgreSQL versions are tested: + +* PostgreSQL 13 +* PostgreSQL 12 +* PostgreSQL 11 +* PostgreSQL 10 + +For each tested version of Kubernetes and PostgreSQL, a Kubernetes +cluster is created using [kind](https://kind.sigs.k8s.io/), +and the following suite of E2E tests are performed on that cluster: + +* Installation of the operator; +* Creation of a `Cluster`; +* Usage of a persistent volume for data storage; +* Connection via services, including read-only; +* Scale-up of a `Cluster`; +* Scale-down of a `Cluster`; +* Failover; +* Switchover; +* Manage PostgreSQL configuration changes; +* Rolling updates when changing PostgreSQL images; +* Backup and ScheduledBackups execution; +* Synchronous replication; +* Restore from backup; +* Pod affinity using `NodeSelector`; +* Metrics collection; +* Primary endpoint switch in case of failover in less than 10 seconds; +* Primary endpoint switch in case of switchover in less than 20 seconds; +* Recover from a degraded state in less than 60 seconds. + +The E2E tests suite is also run for OpenShift 4.6 and the latest Kubernetes +and PostgreSQL releases on clusters created on the following services: + +* Google GKE +* Amazon EKS +* Microsoft Azure AKS diff --git a/temp_kubernetes/original/src/evaluation.md b/temp_kubernetes/original/src/evaluation.md old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/expose_pg_services.md b/temp_kubernetes/original/src/expose_pg_services.md old mode 100755 new mode 100644 index 7b7778cffdf..73ec47ece31 --- a/temp_kubernetes/original/src/expose_pg_services.md +++ b/temp_kubernetes/original/src/expose_pg_services.md @@ -1,143 +1,143 @@ -# Exposing Postgres Services - -This section explains how to expose a PostgreSQL service externally, allowing access -to your PostgreSQL database **from outside your Kubernetes cluster** using -NGINX Ingress Controller. - -If you followed the [QuickStart](/quickstart), you should have by now -a database that can be accessed inside the cluster via the -`cluster-example-rw` (primary) and `cluster-example-r` (read-only) -services in the `default` namespace. Both services use port `5432`. - -Let's assume that you want to make the primary instance accessible from external -accesses on port `5432`. A typical use case, when moving to a Kubernetes -infrastructure, is indeed the one represented by **legacy applications** -that cannot be easily or sustainably "containerized". A sensible workaround -is to allow those applications that most likely reside in a virtual machine -or a physical server, to access a PostgreSQL database inside a Kubernetes cluster -in the same network. - -!!! Warning - Allowing access to a database from the public network could expose - your database to potential attacks from malicious users. Ensure you - secure your database before granting external access or that your - Kubernetes cluster is only reachable from a private network. - -For this example, you will use [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/), -since it is maintained directly by the Kubernetes project and can be set up -on every Kubernetes cluster. Many other controllers are available (see the -[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) -for a comprehensive list). - -We assume that: - -* the NGINX Ingress controller has been deployed and works correctly -* it is possible to create a service of type `LoadBalancer` in your cluster - - -!!! Important - Ingresses are only required to expose HTTP and HTTPS traffic. While the NGINX - Ingress controller can, not all Ingress objects can expose arbitrary ports or - protocols. - -The first step is to create a `tcp-services` `ConfigMap` whose data field -contains info on the externally exposed port and the namespace, service and -port to point to internally. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: tcp-services - namespace: ingress-nginx -data: - 5432: default/cluster-example-rw:5432 -``` - -Then, if you've installed NGINX Ingress Controller as suggested in their -documentation, you should have an `ingress-nginx` service. You'll have to add -the 5432 port to the `ingress-nginx` service to expose it. -The ingress will redirect incoming connections on port 5432 to your database. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -spec: - type: LoadBalancer - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - - name: postgres - port: 5432 - targetPort: 5432 - protocol: TCP - selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -``` - -You can use [`cluster-expose-service.yaml`](samples/cluster-expose-service.yaml) and apply it -using `kubectl`. - -!!! Warning - If you apply this file directly, you will overwrite any previous change - in your `ConfigMap` and `Service` of the Ingress - -Now you will be able to reach the PostgreSQL Cluster from outside your Kubernetes cluster. - -!!! Important - Make sure you configure `pg_hba` to allow connections from the Ingress. - -## Testing on Minikube - -On Minikube you can setup the ingress controller running: - -```sh -minikube addons enable ingress -``` - -Then, patch the `tcp-service` ConfigMap to redirect to the primary the -connections on port 5432 of the Ingress: - -```sh -kubectl patch configmap tcp-services -n kube-system \ - --patch '{"data":{"5432":"default/cluster-example-rw:5432"}}' -``` - -You can then patch the deployment to allow access on port 5432. -Create a file called `patch.yaml` with the following content: - -```yaml -spec: - template: - spec: - containers: - - name: nginx-ingress-controller - ports: - - containerPort: 5432 - hostPort: 5432 -``` - -and apply it to the `nginx-ingress-controller deployment`: - -```sh -kubectl patch deployment nginx-ingress-controller --patch "$(cat patch.yaml)" -n kube-system -``` - -You can access the primary from your machine running: - -```sh -psql -h $(minikube ip) -p 5432 -U postgres -``` +# Exposing Postgres Services + +This section explains how to expose a PostgreSQL service externally, allowing access +to your PostgreSQL database **from outside your Kubernetes cluster** using +NGINX Ingress Controller. + +If you followed the [QuickStart](./quickstart.md), you should have by now +a database that can be accessed inside the cluster via the +`cluster-example-rw` (primary) and `cluster-example-r` (read-only) +services in the `default` namespace. Both services use port `5432`. + +Let's assume that you want to make the primary instance accessible from external +accesses on port `5432`. A typical use case, when moving to a Kubernetes +infrastructure, is indeed the one represented by **legacy applications** +that cannot be easily or sustainably "containerized". A sensible workaround +is to allow those applications that most likely reside in a virtual machine +or a physical server, to access a PostgreSQL database inside a Kubernetes cluster +in the same network. + +!!! Warning + Allowing access to a database from the public network could expose + your database to potential attacks from malicious users. Ensure you + secure your database before granting external access or that your + Kubernetes cluster is only reachable from a private network. + +For this example, you will use [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/), +since it is maintained directly by the Kubernetes project and can be set up +on every Kubernetes cluster. Many other controllers are available (see the +[Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) +for a comprehensive list). + +We assume that: + +* the NGINX Ingress controller has been deployed and works correctly +* it is possible to create a service of type `LoadBalancer` in your cluster + + +!!! Important + Ingresses are only required to expose HTTP and HTTPS traffic. While the NGINX + Ingress controller can, not all Ingress objects can expose arbitrary ports or + protocols. + +The first step is to create a `tcp-services` `ConfigMap` whose data field +contains info on the externally exposed port and the namespace, service and +port to point to internally. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-services + namespace: ingress-nginx +data: + 5432: default/cluster-example-rw:5432 +``` + +Then, if you've installed NGINX Ingress Controller as suggested in their +documentation, you should have an `ingress-nginx` service. You'll have to add +the 5432 port to the `ingress-nginx` service to expose it. +The ingress will redirect incoming connections on port 5432 to your database. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +``` + +You can use [`cluster-expose-service.yaml`](samples/cluster-expose-service.yaml) and apply it +using `kubectl`. + +!!! Warning + If you apply this file directly, you will overwrite any previous change + in your `ConfigMap` and `Service` of the Ingress + +Now you will be able to reach the PostgreSQL Cluster from outside your Kubernetes cluster. + +!!! Important + Make sure you configure `pg_hba` to allow connections from the Ingress. + +## Testing on Minikube + +On Minikube you can setup the ingress controller running: + +```sh +minikube addons enable ingress +``` + +Then, patch the `tcp-service` ConfigMap to redirect to the primary the +connections on port 5432 of the Ingress: + +```sh +kubectl patch configmap tcp-services -n kube-system \ + --patch '{"data":{"5432":"default/cluster-example-rw:5432"}}' +``` + +You can then patch the deployment to allow access on port 5432. +Create a file called `patch.yaml` with the following content: + +```yaml +spec: + template: + spec: + containers: + - name: nginx-ingress-controller + ports: + - containerPort: 5432 + hostPort: 5432 +``` + +and apply it to the `nginx-ingress-controller deployment`: + +```sh +kubectl patch deployment nginx-ingress-controller --patch "$(cat patch.yaml)" -n kube-system +``` + +You can access the primary from your machine running: + +```sh +psql -h $(minikube ip) -p 5432 -U postgres +``` diff --git a/temp_kubernetes/original/src/failure_modes.md b/temp_kubernetes/original/src/failure_modes.md old mode 100755 new mode 100644 index 8bfa7dc273b..a8f2f4e8a6f --- a/temp_kubernetes/original/src/failure_modes.md +++ b/temp_kubernetes/original/src/failure_modes.md @@ -1,156 +1,152 @@ -# Failure Modes - -This section provides an overview of the major failure scenarios that -PostgreSQL can face on a Kubernetes cluster during its lifetime. - -!!! Important - In case the failure scenario you are experiencing is not covered by this - section, please immediately contact EnterpriseDB for support and assistance. - -## Liveness and readiness probes - -Each pod of a `Cluster` has a `postgres` container with a **liveness** -and a **readiness** -[probe](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). - -The liveness and readiness probes check if the database is up and able to accept -connections using the superuser credentials. -The two probes will report a failure if the probe command fails 3 times with a -10 seconds interval between each check. - -For now, the operator doesn't configure a `startupProbe` on the Pods, since -startup probes have been introduced only in Kubernetes 1.17. - -The liveness probe is used to detect if the PostgreSQL instance is in a -broken state and needs to be restarted. The value in `startDelay` is used -to delay the probe's execution, which is used to prevent an -instance with a long startup time from being restarted. - -## Storage space usage - -The operator will instantiate one PVC for every PostgreSQL instance to store the `PGDATA` content. - -Such storage space is set for reuse in two cases: - -- when the corresponding Pod is deleted by the user (and a new Pod will be recreated) -- when the corresponding Pod is evicted and scheduled on another node - -If you want to prevent the operator from reusing a certain PVC you need to -remove the PVC before deleting the Pod. For this purpose, you can use the -following command: - -```sh -kubectl delete -n [namespace] pvc/[cluster-name]-[serial] --wait=false -kubectl delete -n [namespace] pod/[cluster-name]-[serial] -``` - -For example: - -```sh -$ kubectl delete -n default pvc/cluster-example-1 --wait=false -persistentvolumeclaim "cluster-example-1" deleted - -$ kubectl delete -n default pod/cluster-example-1 -pod "cluster-example-1" deleted -``` - -## Failure modes - -A pod belonging to a `Cluster` can fail in the following ways: - -* the pod is explicitly deleted by the user; -* the readiness probe on its `postgres` container fails; -* the liveness probe on its `postgres` container fails; -* the Kubernetes worker node is drained; -* the Kubernetes worker node where the pod is scheduled fails. - -Each one of these failures has different effects on the `Cluster` and the -services managed by the operator. - -### Pod deleted by the user - -The operator is notified of the deletion. A new pod belonging to the -`Cluster` will be automatically created reusing the existing PVC, if available, -or starting from a physical backup of the *primary* otherwise. - -!!! Important - In case of deliberate deletion of a pod, `PodDisruptionBudget` policies - will not be enforced. - -Self-healing will happen as soon as the *apiserver* is notified. - -### Readiness probe failure - -After 3 failures, the pod will be considered *not ready*. The pod will still -be part of the `Cluster`, no new pod will be created. - -If the cause of the failure can't be fixed, it is possible to delete the pod -manually. Otherwise, the pod will resume the previous role when the failure -is solved. - -Self-healing will happen after three failures of the probe. - -### Liveness probe failure - -After 3 failures, the `postgres` container will be considered failed. The -pod will still be part of the `Cluster`, and the *kubelet* will try to restart -the container. If the cause of the failure can't be fixed, it is possible -to delete the pod manually. - -Self-healing will happen after three failures of the probe. - -### Worker node drained - -The pod will be evicted from the worker node and removed from the service. A -new pod will be created on a different worker node from a physical backup of the -*primary* if the `reusePVC` option of the `nodeMaintenanceWindow` parameter -is set to `off` (default: `on` during maintenance windows, `off` otherwise). - -The `PodDisruptionBudget` may prevent the pod from being evicted if there -is at least one node that is not ready. - -Self-healing will happen as soon as the *apiserver* is notified. - -### Worker node failure - -Since the node is failed, the *kubelet* won't execute the liveness and -the readiness probes. The pod will be marked for deletion after the -toleration seconds configured by the Kubernetes cluster administrator for -that specific failure cause. Based on how the Kubernetes cluster is configured, -the pod might be removed from the service earlier. - -A new pod will be created on a different worker node from a physical backup -of the *primary*. The default value for that parameter in a Kubernetes -cluster is 5 minutes. - -Self-healing will happen after `tolerationSeconds`. - -## Self-healing - -If the failed pod is a standby, the pod is removed from the `-r` service. -The pod is then restarted using its PVC if available; otherwise, a new -pod will be created from a backup of the current primary. The pod -will be added again to the `-r` service when ready. - -If the failed pod is the primary, the operator will promote the active pod -with status ready and the lowest replication lag, then point the `-rw`service -to it. The failed pod will be removed from the `-r` service. -Other standbys will start replicating from the new primary. The former -primary will use `pg_rewind` to synchronize itself with the new one if its -PVC is available; otherwise, a new standby will be created from a backup of the -current primary. - -!!! Important - Due to a [bug in PostgreSQL 13 streaming replication](https://www.postgresql.org/message-id/flat/20201209.174314.282492377848029776.horikyota.ntt%40gmail.com) - it is not guaranteed that an existing standby is able to follow a promoted - primary, even if the new primary contains all the required WALs. Standbys - will be able to follow a primary if WAL archiving is configured. - -## Manual intervention - -In the case of undocumented failure, it might be necessary to intervene -to solve the problem manually. - -!!! Important - In such cases, please do not perform any manual operation without the - support and assistance of EnterpriseDB engineering team. +# Failure Modes + +This section provides an overview of the major failure scenarios that +PostgreSQL can face on a Kubernetes cluster during its lifetime. + +!!! Important + In case the failure scenario you are experiencing is not covered by this + section, please immediately contact EnterpriseDB for support and assistance. + +## Liveness and readiness probes + +Each pod of a `Cluster` has a `postgres` container with a **liveness** +and a **readiness** +[probe](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + +The liveness and readiness probes check if the database is up and able to accept +connections using the superuser credentials. +The two probes will report a failure if the probe command fails 3 times with a +10 seconds interval between each check. + +For now, the operator doesn't configure a `startupProbe` on the Pods, since +startup probes have been introduced only in Kubernetes 1.17. + +The liveness probe is used to detect if the PostgreSQL instance is in a +broken state and needs to be restarted. The value in `startDelay` is used +to delay the probe's execution, which is used to prevent an +instance with a long startup time from being restarted. + +## Storage space usage + +The operator will instantiate one PVC for every PostgreSQL instance to store the `PGDATA` content. + +Such storage space is set for reuse in two cases: + +- when the corresponding Pod is deleted by the user (and a new Pod will be recreated) +- when the corresponding Pod is evicted and scheduled on another node + +If you want to prevent the operator from reusing a certain PVC you need to +remove the PVC before deleting the Pod. For this purpose, you can use the +following command: + +```sh +kubectl delete -n [namespace] pvc/[cluster-name]-[serial] --wait=false +kubectl delete -n [namespace] pod/[cluster-name]-[serial] +``` + +For example: + +```sh +$ kubectl delete -n default pvc/cluster-example-1 --wait=false +persistentvolumeclaim "cluster-example-1" deleted + +$ kubectl delete -n default pod/cluster-example-1 +pod "cluster-example-1" deleted +``` + +## Failure modes + +A pod belonging to a `Cluster` can fail in the following ways: + +* the pod is explicitly deleted by the user; +* the readiness probe on its `postgres` container fails; +* the liveness probe on its `postgres` container fails; +* the Kubernetes worker node is drained; +* the Kubernetes worker node where the pod is scheduled fails. + +Each one of these failures has different effects on the `Cluster` and the +services managed by the operator. + +### Pod deleted by the user + +The operator is notified of the deletion. A new pod belonging to the +`Cluster` will be automatically created reusing the existing PVC, if available, +or starting from a physical backup of the *primary* otherwise. + +!!! Important + In case of deliberate deletion of a pod, `PodDisruptionBudget` policies + will not be enforced. + +Self-healing will happen as soon as the *apiserver* is notified. + +### Readiness probe failure + +After 3 failures, the pod will be considered *not ready*. The pod will still +be part of the `Cluster`, no new pod will be created. + +If the cause of the failure can't be fixed, it is possible to delete the pod +manually. Otherwise, the pod will resume the previous role when the failure +is solved. + +Self-healing will happen after three failures of the probe. + +### Liveness probe failure + +After 3 failures, the `postgres` container will be considered failed. The +pod will still be part of the `Cluster`, and the *kubelet* will try to restart +the container. If the cause of the failure can't be fixed, it is possible +to delete the pod manually. + +Self-healing will happen after three failures of the probe. + +### Worker node drained + +The pod will be evicted from the worker node and removed from the service. A +new pod will be created on a different worker node from a physical backup of the +*primary* if the `reusePVC` option of the `nodeMaintenanceWindow` parameter +is set to `off` (default: `on` during maintenance windows, `off` otherwise). + +The `PodDisruptionBudget` may prevent the pod from being evicted if there +is at least one node that is not ready. + +Self-healing will happen as soon as the *apiserver* is notified. + +### Worker node failure + +Since the node is failed, the *kubelet* won't execute the liveness and +the readiness probes. The pod will be marked for deletion after the +toleration seconds configured by the Kubernetes cluster administrator for +that specific failure cause. Based on how the Kubernetes cluster is configured, +the pod might be removed from the service earlier. + +A new pod will be created on a different worker node from a physical backup +of the *primary*. The default value for that parameter in a Kubernetes +cluster is 5 minutes. + +Self-healing will happen after `tolerationSeconds`. + +## Self-healing + +If the failed pod is a standby, the pod is removed from the `-r` service +and from the `-ro` service. +The pod is then restarted using its PVC if available; otherwise, a new +pod will be created from a backup of the current primary. The pod +will be added again to the `-r` service and to the `-ro` service when ready. + +If the failed pod is the primary, the operator will promote the active pod +with status ready and the lowest replication lag, then point the `-rw`service +to it. The failed pod will be removed from the `-r` service and from the +`-ro` service. +Other standbys will start replicating from the new primary. The former +primary will use `pg_rewind` to synchronize itself with the new one if its +PVC is available; otherwise, a new standby will be created from a backup of the +current primary. + +## Manual intervention + +In the case of undocumented failure, it might be necessary to intervene +to solve the problem manually. + +!!! Important + In such cases, please do not perform any manual operation without the + support and assistance of EnterpriseDB engineering team. diff --git a/temp_kubernetes/original/src/images/apps-in-k8s.png b/temp_kubernetes/original/src/images/apps-in-k8s.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/apps-outside-k8s.png b/temp_kubernetes/original/src/images/apps-outside-k8s.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/architecture-in-k8s.png b/temp_kubernetes/original/src/images/architecture-in-k8s.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/architecture-r.png b/temp_kubernetes/original/src/images/architecture-r.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/architecture-rw.png b/temp_kubernetes/original/src/images/architecture-rw.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/network-storage-architecture.png b/temp_kubernetes/original/src/images/network-storage-architecture.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/operator-capability-level.png b/temp_kubernetes/original/src/images/operator-capability-level.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/public-cloud-architecture-storage-replication.png b/temp_kubernetes/original/src/images/public-cloud-architecture-storage-replication.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/public-cloud-architecture.png b/temp_kubernetes/original/src/images/public-cloud-architecture.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/images/shared-nothing-architecture.png b/temp_kubernetes/original/src/images/shared-nothing-architecture.png old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/index.md b/temp_kubernetes/original/src/index.md old mode 100755 new mode 100644 index 617818cac7c..0ac8212eeb7 --- a/temp_kubernetes/original/src/index.md +++ b/temp_kubernetes/original/src/index.md @@ -1,79 +1,79 @@ -# Cloud Native PostgreSQL - -**Cloud Native PostgreSQL** is an [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) -designed by [EnterpriseDB](https://www.enterprisedb.com) -to manage [PostgreSQL](https://www.postgresql.org/) workloads on any supported [Kubernetes](https://kubernetes.io) -cluster running in private, public, or hybrid cloud environments. -Cloud Native PostgreSQL adheres to DevOps principles and concepts -such as declarative configuration and immutable infrastructure. - -It defines a new Kubernetes resource called "Cluster" representing a PostgreSQL -cluster made up of a single primary and an optional number of replicas that co-exist -in a chosen Kubernetes namespace for High Availability and offloading of -read-only queries. - -Applications that reside in the same Kubernetes cluster can access the -PostgreSQL database using a service which is solely managed by the operator, -without having to worry about changes of the primary role following a failover -or a switchover. Applications that reside outside the Kubernetes cluster, need -to configure an Ingress object to expose the service via TCP. - -Cloud Native PostgreSQL works with PostgreSQL and [EDB Postgres Advanced](https://www.enterprisedb.com/products/edb-postgres-advanced-server-secure-ha-oracle-compatible) -and is available under the [EnterpriseDB Limited Use License](https://www.enterprisedb.com/limited-use-license). - -You can [evaluate Cloud Native PostgreSQL for free](evaluation.md). -You need a valid license key to use Cloud Native PostgreSQL in production. - -!!! IMPORTANT - Currently, based on the [Operator Capability Levels model](operator_capability_levels.md), - users can expect a **"Level III - Full Lifecycle"** set of capabilities from the - Cloud Native PostgreSQL Operator. - -## Requirements - -Cloud Native PostgreSQL requires Kubernetes 1.16 or higher, tested on AWS, Google, Azure (with multiple availability zones). - -Cloud Native PostgreSQL has also been certified for -[RedHat OpenShift Container Platform (OCP)](https://www.openshift.com/products/container-platform) -4.5+ and is available directly from the [RedHat Catalog](https://catalog.redhat.com/). -OpenShift Container Platform is an open-source distribution of Kubernetes which is -[maintained and commercially supported](https://access.redhat.com/support/policy/updates/openshift#ocp4) -by Red Hat. - -## Supported PostgreSQL versions - -PostgreSQL and EDB Postgres Advanced 13, 12, 11 and 10 are currently supported. - -## Main features - -* Direct integration with Kubernetes API server for High Availability, - without requiring an external tool -* Self-Healing capability, through: - * failover of the primary instance by promoting the most aligned replica - * automated recreation of a replica -* Planned switchover of the primary instance by promoting a selected replica -* Scale up/down capabilities -* Definition of an arbitrary number of instances (minimum 1 - one primary server) -* Definition of the *read-write* service, to connect your applications to the only primary server of the cluster -* Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads -* Support for Local Persistent Volumes with PVC templates -* Reuse of Persistent Volumes storage in Pods -* Rolling updates for PostgreSQL minor versions and operator upgrades -* TLS connections and client certificate authentication -* Continuous backup to an S3 compatible object store -* Full recovery and Point-In-Time recovery from an S3 compatible object store backup -* Support for Synchronous Replicas -* Support for node affinity via `nodeSelector` -* Standard output logging of PostgreSQL error messages - -## About this guide - -Follow the instructions in the ["Quickstart"](quickstart.md) to test Cloud Native PostgreSQL -on a local Kubernetes cluster using Minikube or Kind. - -In case you are not familiar with some basic terminology on Kubernetes and PostgreSQL, -please consult the ["Before you start" section](before_you_start.md). - -!!! Note - Although the guide primarily addresses Kubernetes, all concepts can - be extended to OpenShift as well. +# Cloud Native PostgreSQL + +**Cloud Native PostgreSQL** is an [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) +designed by [EnterpriseDB](https://www.enterprisedb.com) +to manage [PostgreSQL](https://www.postgresql.org/) workloads on any supported [Kubernetes](https://kubernetes.io) +cluster running in private, public, or hybrid cloud environments. +Cloud Native PostgreSQL adheres to DevOps principles and concepts +such as declarative configuration and immutable infrastructure. + +It defines a new Kubernetes resource called "Cluster" representing a PostgreSQL +cluster made up of a single primary and an optional number of replicas that co-exist +in a chosen Kubernetes namespace for High Availability and offloading of +read-only queries. + +Applications that reside in the same Kubernetes cluster can access the +PostgreSQL database using a service which is solely managed by the operator, +without having to worry about changes of the primary role following a failover +or a switchover. Applications that reside outside the Kubernetes cluster, need +to configure an Ingress object to expose the service via TCP. + +Cloud Native PostgreSQL works with PostgreSQL and [EDB Postgres Advanced](https://www.enterprisedb.com/products/edb-postgres-advanced-server-secure-ha-oracle-compatible) +and is available under the [EnterpriseDB Limited Use License](https://www.enterprisedb.com/limited-use-license). + +You can [evaluate Cloud Native PostgreSQL for free](evaluation.md). +You need a valid license key to use Cloud Native PostgreSQL in production. + +!!! IMPORTANT + Currently, based on the [Operator Capability Levels model](operator_capability_levels.md), + users can expect a **"Level III - Full Lifecycle"** set of capabilities from the + Cloud Native PostgreSQL Operator. + +## Requirements + +Cloud Native PostgreSQL requires Kubernetes 1.16 or higher, tested on AWS, Google, Azure (with multiple availability zones). + +Cloud Native PostgreSQL has also been certified for +[RedHat OpenShift Container Platform (OCP)](https://www.openshift.com/products/container-platform) +4.5+ and is available directly from the [RedHat Catalog](https://catalog.redhat.com/). +OpenShift Container Platform is an open-source distribution of Kubernetes which is +[maintained and commercially supported](https://access.redhat.com/support/policy/updates/openshift#ocp4) +by Red Hat. + +## Supported PostgreSQL versions + +PostgreSQL and EDB Postgres Advanced 13, 12, 11 and 10 are currently supported. + +## Main features + +* Direct integration with Kubernetes API server for High Availability, + without requiring an external tool +* Self-Healing capability, through: + * failover of the primary instance by promoting the most aligned replica + * automated recreation of a replica +* Planned switchover of the primary instance by promoting a selected replica +* Scale up/down capabilities +* Definition of an arbitrary number of instances (minimum 1 - one primary server) +* Definition of the *read-write* service, to connect your applications to the only primary server of the cluster +* Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads +* Support for Local Persistent Volumes with PVC templates +* Reuse of Persistent Volumes storage in Pods +* Rolling updates for PostgreSQL minor versions and operator upgrades +* TLS connections and client certificate authentication +* Continuous backup to an S3 compatible object store +* Full recovery and Point-In-Time recovery from an S3 compatible object store backup +* Support for Synchronous Replicas +* Support for node affinity via `nodeSelector` +* Standard output logging of PostgreSQL error messages + +## About this guide + +Follow the instructions in the ["Quickstart"](quickstart.md) to test Cloud Native PostgreSQL +on a local Kubernetes cluster using Minikube or Kind. + +In case you are not familiar with some basic terminology on Kubernetes and PostgreSQL, +please consult the ["Before you start" section](before_you_start.md). + +!!! Note + Although the guide primarily addresses Kubernetes, all concepts can + be extended to OpenShift as well. diff --git a/temp_kubernetes/original/src/installation.md b/temp_kubernetes/original/src/installation.md old mode 100755 new mode 100644 index 2b730fc9182..47964a141dc --- a/temp_kubernetes/original/src/installation.md +++ b/temp_kubernetes/original/src/installation.md @@ -1,78 +1,78 @@ -# Installation - -## Installation on Kubernetes - -The operator can be installed like any other resource in Kubernetes, -through a YAML manifest applied via `kubectl`. - -You can install the [latest operator manifest](samples/postgresql-operator-1.0.0.yaml) -as follows: - -```sh -kubectl apply -f \ - https://docs.enterprisedb.io/cloud-native-postgresql/latest/samples/postgresql-operator-1.0.0.yaml -``` - -Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster. - -You can verify that with: - -```sh -kubectl get deploy -n postgresql-operator-system postgresql-operator-controller-manager -``` - -## Installation on Openshift - -### Via the web interface - -Log in to the console as `kubeadmin` and navigate to the `Operator → OperatorHub` page. - -Find the `Cloud Native PostgreSQL` box scrolling or using the search filter. - -Select the operator and click `Install`. Click `Install` again in the following -`Install Operator`, using the default settings. For an in-depth explanation of -those settings, see the [Openshift documentation](https://docs.openshift.com/container-platform/4.6/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster). - -The operator will soon be available in all the namespaces. - -Depending on the security levels applied to the OpenShift cluster you may be -required to create a proper set of roles and permissions for the operator to -be used in different namespaces. -For more information on this matter see the -[Openshift documentation](https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-operatorgroups.html). - -### Via the `oc` command line - -You can add the [`subscription`](samples/subscription.yaml) to install the operator in all the namespaces -as follows: - -```sh -oc apply -f \ - https://docs.enterprisedb.io/cloud-native-postgresql/latest/samples/subscription.yaml -``` - -The operator will soon be available in all the namespaces. - -More information on -[how to install operators via CLI](https://docs.openshift.com/container-platform/4.6/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-operator-from-operatorhub-using-cli_olm-adding-operators-to-a-cluster) -is available in the Openshift documentation. - -## Details about the deployment - -In Kubernetes, the operator is by default installed in the `postgresql-operator-system` namespace as a Kubernetes -`Deployment` called `postgresql-operator-controller-manager`. You can get more information by running: - -```sh -kubectl describe deploy -n postgresql-operator-system postgresql-operator-controller-manager -``` - -As any deployment, it sits on top of a replica set and supports rolling upgrades. -By default, we currently support only 1 replica. In future versions we plan to -support multiple replicas and leader election, as well as taints and tolerations -so to enable deployment on the Kubernetes control plane. - -In case the node where the pod is running is not reachable anymore, -the pod will be rescheduled on another node. - -As far as OpenShift is concerned, details might differ depending on the -selected installation method. +# Installation + +## Installation on Kubernetes + +The operator can be installed like any other resource in Kubernetes, +through a YAML manifest applied via `kubectl`. + +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.1.0.yaml) +as follows: + +```sh +kubectl apply -f \ + https://get.enterprisedb.io/cnp/postgresql-operator-1.1.0.yaml +``` + +Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster. + +You can verify that with: + +```sh +kubectl get deploy -n postgresql-operator-system postgresql-operator-controller-manager +``` + +## Installation on Openshift + +### Via the web interface + +Log in to the console as `kubeadmin` and navigate to the `Operator → OperatorHub` page. + +Find the `Cloud Native PostgreSQL` box scrolling or using the search filter. + +Select the operator and click `Install`. Click `Install` again in the following +`Install Operator`, using the default settings. For an in-depth explanation of +those settings, see the [Openshift documentation](https://docs.openshift.com/container-platform/4.6/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-from-operatorhub-using-web-console_olm-adding-operators-to-a-cluster). + +The operator will soon be available in all the namespaces. + +Depending on the security levels applied to the OpenShift cluster you may be +required to create a proper set of roles and permissions for the operator to +be used in different namespaces. +For more information on this matter see the +[Openshift documentation](https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-operatorgroups.html). + +### Via the `oc` command line + +You can add the [`subscription`](samples/subscription.yaml) to install the operator in all the namespaces +as follows: + +```sh +oc apply -f \ + https://docs.enterprisedb.io/cloud-native-postgresql/latest/samples/subscription.yaml +``` + +The operator will soon be available in all the namespaces. + +More information on +[how to install operators via CLI](https://docs.openshift.com/container-platform/4.6/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-operator-from-operatorhub-using-cli_olm-adding-operators-to-a-cluster) +is available in the Openshift documentation. + +## Details about the deployment + +In Kubernetes, the operator is by default installed in the `postgresql-operator-system` namespace as a Kubernetes +`Deployment` called `postgresql-operator-controller-manager`. You can get more information by running: + +```sh +kubectl describe deploy -n postgresql-operator-system postgresql-operator-controller-manager +``` + +As any deployment, it sits on top of a replica set and supports rolling upgrades. +By default, we currently support only 1 replica. In future versions we plan to +support multiple replicas and leader election, as well as taints and tolerations +so to enable deployment on the Kubernetes control plane. + +In case the node where the pod is running is not reachable anymore, +the pod will be rescheduled on another node. + +As far as OpenShift is concerned, details might differ depending on the +selected installation method. diff --git a/temp_kubernetes/original/src/kubernetes_upgrade.md b/temp_kubernetes/original/src/kubernetes_upgrade.md old mode 100755 new mode 100644 index 71fae8cf170..1c8ebe67e86 --- a/temp_kubernetes/original/src/kubernetes_upgrade.md +++ b/temp_kubernetes/original/src/kubernetes_upgrade.md @@ -1,86 +1,86 @@ -# Kubernetes Upgrade - -Kubernetes clusters must be kept updated. This becomes even more -important if you are self-managing your Kubernetes clusters, especially -on **bare metal**. - -Planning and executing regular updates is a way for your organization -to clean up the technical debt and reduce the business risks, despite -the introduction in your Kubernetes infrastructure of controlled -downtimes that temporarily take out a node from the cluster for -maintenance reasons (recommended reading: -["Embracing Risk"](https://landing.google.com/sre/sre-book/chapters/embracing-risk/) -from the Site Reliability Engineering book). - -For example, you might need to apply security updates on the Linux -servers where Kubernetes is installed, or to replace a malfunctioning -hardware component such as RAM, CPU, or RAID controller, or even upgrade -the cluster to the latest version of Kubernetes. - -Usually, maintenance operations in a cluster are performed one node -at a time by: - -1. evicting the workloads from the node to be updated (`drain`) -2. performing the actual operation (for example, system update) -3. re-joining the node to the cluster (`uncordon`) - -The above process requires workloads to be either stopped for the -entire duration of the upgrade or migrated on another node. - -While the latest case is the expected one in terms of service -reliability and self-healing capabilities of Kubernetes, there can -be situations where it is advised to operate with a temporarily -degraded cluster and wait for the upgraded node to be up again. - -In particular, if your PostgreSQL cluster relies on **node-local storage** -\- that is *storage which is local to the Kubernetes worker node where -the PostgreSQL database is running*. -Node-local storage (or simply *local storage*) is used to enhance performance. - -!!! Note - If your database files are on shared storage over the network, - you may not need to define a maintenance window. If the volumes currently - used by the pods can be reused by pods running on different nodes after - the drain, the default self-healing behavior of the operator will work - fine (you can then skip the rest of this section). - -When using local storage for PostgreSQL, you are advised to temporarily -put the cluster in **maintenance mode** through the `nodeMaintenanceWindow` -option to avoid standard self-healing procedures to kick in, -while, for example, enlarging the partition on the physical node or -updating the node itself. - -!!! Warning - Limit the duration of the maintenance window to the shortest - amount of time possible. In this phase, some of the expected - behaviors of Kubernetes are either disabled or running with - some limitations, including self-healing, rolling updates, - and Pod disruption budget. - -The `nodeMaintenanceWindow` option of the cluster has two further -settings: - -`inProgress`: -Boolean value that states if the maintenance window for the nodes -is currently in progress or not. By default, it is set to `off`. -During the maintenance window, the `reusePVC` option below is -evaluated by the operator. - -`reusePVC`: -Boolean value that defines if an existing PVC is reused or -not during the maintenance operation. By default, it is set to `on`. -When **enabled**, Kubernetes waits for the node to come up -again and then reuses the existing PVC; the `PodDisruptionBudget` -policy is temporarily removed. -When **disabled**, Kubernetes forces the recreation of the -Pod on a different node with a new PVC by relying on -PostgreSQL's physical streaming replication, then destroys -the old PVC together with the Pod. This scenario is generally -not recommended unless the database's size is small, and recloning -the new PostgreSQL instance takes shorter than waiting. - -!!! Note - When performing the `kubectl drain` command, you will need - to add the `--delete-local-data` option. - Don't be afraid: it refers to another volume internally used - by the operator - not the PostgreSQL data directory. +# Kubernetes Upgrade + +Kubernetes clusters must be kept updated. This becomes even more +important if you are self-managing your Kubernetes clusters, especially +on **bare metal**. + +Planning and executing regular updates is a way for your organization +to clean up the technical debt and reduce the business risks, despite +the introduction in your Kubernetes infrastructure of controlled +downtimes that temporarily take out a node from the cluster for +maintenance reasons (recommended reading: +["Embracing Risk"](https://landing.google.com/sre/sre-book/chapters/embracing-risk/) +from the Site Reliability Engineering book). + +For example, you might need to apply security updates on the Linux +servers where Kubernetes is installed, or to replace a malfunctioning +hardware component such as RAM, CPU, or RAID controller, or even upgrade +the cluster to the latest version of Kubernetes. + +Usually, maintenance operations in a cluster are performed one node +at a time by: + +1. evicting the workloads from the node to be updated (`drain`) +2. performing the actual operation (for example, system update) +3. re-joining the node to the cluster (`uncordon`) + +The above process requires workloads to be either stopped for the +entire duration of the upgrade or migrated on another node. + +While the latest case is the expected one in terms of service +reliability and self-healing capabilities of Kubernetes, there can +be situations where it is advised to operate with a temporarily +degraded cluster and wait for the upgraded node to be up again. + +In particular, if your PostgreSQL cluster relies on **node-local storage** +\- that is *storage which is local to the Kubernetes worker node where +the PostgreSQL database is running*. +Node-local storage (or simply *local storage*) is used to enhance performance. + +!!! Note + If your database files are on shared storage over the network, + you may not need to define a maintenance window. If the volumes currently + used by the pods can be reused by pods running on different nodes after + the drain, the default self-healing behavior of the operator will work + fine (you can then skip the rest of this section). + +When using local storage for PostgreSQL, you are advised to temporarily +put the cluster in **maintenance mode** through the `nodeMaintenanceWindow` +option to avoid standard self-healing procedures to kick in, +while, for example, enlarging the partition on the physical node or +updating the node itself. + +!!! Warning + Limit the duration of the maintenance window to the shortest + amount of time possible. In this phase, some of the expected + behaviors of Kubernetes are either disabled or running with + some limitations, including self-healing, rolling updates, + and Pod disruption budget. + +The `nodeMaintenanceWindow` option of the cluster has two further +settings: + +`inProgress`: +Boolean value that states if the maintenance window for the nodes +is currently in progress or not. By default, it is set to `off`. +During the maintenance window, the `reusePVC` option below is +evaluated by the operator. + +`reusePVC`: +Boolean value that defines if an existing PVC is reused or +not during the maintenance operation. By default, it is set to `on`. +When **enabled**, Kubernetes waits for the node to come up +again and then reuses the existing PVC; the `PodDisruptionBudget` +policy is temporarily removed. +When **disabled**, Kubernetes forces the recreation of the +Pod on a different node with a new PVC by relying on +PostgreSQL's physical streaming replication, then destroys +the old PVC together with the Pod. This scenario is generally +not recommended unless the database's size is small, and re-cloning +the new PostgreSQL instance takes shorter than waiting. + +!!! Note + When performing the `kubectl drain` command, you will need + to add the `--delete-local-data` option. + Don't be afraid: it refers to another volume internally used + by the operator - not the PostgreSQL data directory. diff --git a/temp_kubernetes/original/src/license_keys.md b/temp_kubernetes/original/src/license_keys.md old mode 100755 new mode 100644 index 187df0c3c32..a88782dc74d --- a/temp_kubernetes/original/src/license_keys.md +++ b/temp_kubernetes/original/src/license_keys.md @@ -1,37 +1,37 @@ -# License and License Keys - -Each `Cluster` resource has a `licenseKey` parameter in its definition. - -A `licenseKey` is always required for the operator to work. - -The only exception is when you run the operator with Community PostgreSQL: -in this case, if the `licenseKey` parameter is unset, a cluster will be -started with the default trial license - which automatically expires after 30 days. - -!!! Important - After the license expiration, the operator will cease any reconciliation attempt - on the cluster, effectively stopping to manage its status. - The pods and the data will still be available. - -You can find the expiration date, as well as more information about the license, -in the cluster status: - -```sh -kubectl get cluster cluster_example -o yaml -[...] -status: - [...] - licenseStatus: - licenseExpiration: "2021-11-06T09:36:02Z" - licenseStatus: Trial - valid: true -[...] -``` - -A cluster license key can be updated with a new one at any moment, to extend -the expiration date or move the cluster to a production license. - -Cloud Native PostgreSQL is distributed under the EnterpriseDB Limited Usage License -Agreement, available at [enterprisedb.com/limited-use-license](https://www.enterprisedb.com/limited-use-license). - -Cloud Native PostgreSQL: Copyright (C) 2019-2020 EnterpriseDB. +# License and License Keys + +Each `Cluster` resource has a `licenseKey` parameter in its definition. + +A `licenseKey` is always required for the operator to work. + +The only exception is when you run the operator with Community PostgreSQL: +in this case, if the `licenseKey` parameter is unset, a cluster will be +started with the default trial license - which automatically expires after 30 days. + +!!! Important + After the license expiration, the operator will cease any reconciliation attempt + on the cluster, effectively stopping to manage its status. + The pods and the data will still be available. + +You can find the expiration date, as well as more information about the license, +in the cluster status: + +```sh +kubectl get cluster cluster_example -o yaml +[...] +status: + [...] + licenseStatus: + licenseExpiration: "2021-11-06T09:36:02Z" + licenseStatus: Trial + valid: true +[...] +``` + +A cluster license key can be updated with a new one at any moment, to extend +the expiration date or move the cluster to a production license. + +Cloud Native PostgreSQL is distributed under the EnterpriseDB Limited Usage License +Agreement, available at [enterprisedb.com/limited-use-license](https://www.enterprisedb.com/limited-use-license). + +Cloud Native PostgreSQL: Copyright (C) 2019-2020 EnterpriseDB. diff --git a/temp_kubernetes/original/src/operator_capability_levels.md b/temp_kubernetes/original/src/operator_capability_levels.md old mode 100755 new mode 100644 index 986b8c8cdfe..04007a8092d --- a/temp_kubernetes/original/src/operator_capability_levels.md +++ b/temp_kubernetes/original/src/operator_capability_levels.md @@ -1,396 +1,396 @@ -# Operator Capability Levels - -This section provides a summary of the capabilities implemented by Cloud Native PostgreSQL, -classified using the -["Operator SDK definition of Capability Levels"](https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/) -framework. - -![Operator Capability Levels](./images/operator-capability-level.png) - -Each capability level is associated with a certain set of management features the operator offers: - -1. Basic Install -2. Seamless Upgrades -3. Full Lifecycle -4. Deep Insights -5. Auto Pilot - -!!! Note - We consider this framework as a guide for future work and implementations in the operator. - -## Level 1 - Basic Install - -Capability level 1 involves **installation** and **configuration** of the -operator. This category includes usability and user experience -enhancements, such as improvements in how users interact with the -operator and a PostgreSQL cluster configuration. - -!!! Important - We consider **Information Security** part of this level. - -### Operator deployment via declarative configuration - -The operator is installed in a declarative way using a Kubernetes manifest -which defines 3 `CustomResourceDefinition` objects: `Cluster`, `Backup`, -`ScheduledBackup`. - -### PostgreSQL cluster deployment via declarative configuration - -A PostgreSQL cluster (operand) is defined using the `Cluster` custom resource -in a fully declarative way. The PostgreSQL version is determined by the -operand container image defined in the CR, which is automatically fetched -from the requested registry. When deploying an operand, the operator also -automatically creates the following resources: `Pod`, `Service`, `Secret`, -`ConfigMap`,`PersistentVolumeClaim`, `PodDisruptionBudget`, `ServiceAccount`, -`RoleBinding`, `Role`. - -### Override of operand images through the CRD - -The operator is designed to support any operand container image with -PostgreSQL inside. -By default, the operator uses the latest available minor -version of the latest stable major version supported by the PostgreSQL -Community and published on Quay.io by EnterpriseDB. -You can use any compatible image of PostgreSQL supporting the -primary/standby architecture directly by setting the `imageName` -attribute in the CR. The operator also supports `imagePullSecretsNames` -to access private container registries. - -### Self-contained instance manager - -Instead of relying on an external tool such as Patroni or Stolon to -coordinate PostgreSQL instances in the Kubernetes cluster pods, the operator -injects the operator executable inside each pod, in a file named -`/controller/manager`. The application is used to control the underlying -PostgreSQL instance and to reconcile the pod status with the instance itself -based on the PostgreSQL cluster topology. The instance manager also starts a -web server that is invoked by the `kubelet` for probes. Unix signals invoked -by the `kubelet` are filtered by the instance manager and, where appropriate, -forwarded to the `postmaster` process for fast and controlled reactions to -external events. The instance manager is written in Go and has no external -dependencies. - -### Storage configuration - -Storage is a critical component in a database workload. Taking advantage of -Kubernetes native capabilities and resources in terms of storage, the -operator gives users enough flexibility to choose the right storage for their -workload requirements, based on what the underlying Kubernetes environment -can offer. This implies choosing a particular storage class in -a public cloud environment or fine-tuning the generated PVC through a -PVC template in the CR's `storage` parameter. - -### Replica configuration - -The operator automatically detects replicas in a cluster -through a single parameter called `instances`. If set to `1`, the cluster -comprises a single primary PostgreSQL instance with no replica. If higher -than `1`, the operator manages `instances -1` replicas, including high -availability through automated failover and rolling updates through -switchover operations. - -### Database configuration - -The operator is designed to manage a PostgreSQL cluster with a single -database. The operator transparently manages access to the database through -two Kubernetes services automatically provisioned and managed for read-write -and read-only workloads. -Using the convention over configuration approach, the operator creates a -database called `app`, by default owned by a regular Postgres user with the -same name. Both the database name and the user name can be specified if -required. -Although no configuration is required to run the cluster, users can customize -both PostgreSQL run-time configuration and PostgreSQL Host-Based -Authentication rules in the `postgresql` section of the CR. - -### Pod Security Policies - -For InfoSec requirements, the operator does not need privileged mode for the -execution of containers and access to volumes both in the operator and in the -operand. - -### License keys - -The operator comes with support for license keys, with the possibility to -programmatically define a default behavior in case of the absence of a key. -Cloud Native PostgreSQL has been programmed to create an implicit 30-day -trial license for every deployed cluster. -License keys are signed strings that the operator can verify using an -asymmetric key technique. The content is a JSON object that includes the -product, the cluster identifiers (namespace and name), the number of -instances, the expiration date, and, if required, the credentials to be used -as a secret by the operator to pull down an image from a protected container -registry. Beyond the expiration date, the operator will stop any -reconciliation process until the license key is restored. - -### Current status of the cluster - -The operator continuously updates the status section of the CR with the -observed status of the cluster. The entire PostgreSQL cluster status is -continuously monitored by the instance manager running in each pod: the -instance manager is responsible for applying the required changes to the -controlled PostgreSQL instance to converge to the required status of -the cluster (for example: if the cluster status reports that pod `-1` is the -primary, pod `-1` needs to promote itself while the other pods need to follow -pod `-1`). The same status is used by Kubernetes client applications to -provide details, including the OpenShift dashboard. - -### Operator's certification authority - -The operator automatically creates a certification authority for itself. -It creates and signs with the operator certification authority a leaf certificate -to be used by the webhook server, to ensure safe communication between the -Kubernetes API Server and the operator itself. - -### Cluster's certification authority - -The operator automatically creates a certification authority for every PostgreSQL -cluster, which is used to issue and renew TLS certificates for the authentication -of streaming replication standby servers and applications (instead of passwords). -The operator will use the Certification Authority to sign every cluster -certification authority. - -### TLS connections - -The operator transparently and natively supports TLS/SSL connections -to encrypt client/server communications for increased security using the -cluster's certification authority. - -### Certificate authentication for streaming replication - -The operator relies on TLS client certificate authentication to authorize streaming -replication connections from the standby servers, instead of relying on a password -(and therefore a secret). - -### Continuous configuration management - -The operator enables users to apply changes to the `Cluster` resource YAML -section of the PostgreSQL configuration and makes sure that all instances -are properly reloaded or restarted, depending on the configuration option. -*Current limitations:* changes with `ALTER SYSTEM` are not detected, meaning -that the cluster state is not enforced; proper restart order is not implemented -with [hot standby sensitive parameters](https://www.postgresql.org/docs/current/hot-standby.html#HOT-STANDBY-ADMIN) -such as `max_connections` and `max_wal_senders`. - -### Multiple installation methods - -The operator can be installed through a Kubernetes manifest via `kubectl -apply`, to be used in a traditional Kubernetes installation in public -and private cloud environments. Additionally, it can be deployed on OpenShift -Container Platform via OperatorHub. - -### Convention over configuration - -The operator supports the convention over configuration paradigm, deciding -standard default values while allowing users to override them and customize -them. You can specify a deployment of a PostgreSQL cluster using -the `Cluster` CRD in a couple of YAML code lines. - -## Level 2 - Seamless Upgrades - -Capability level 2 is about enabling **updates of the operator and the actual -workload**, in our case PostgreSQL servers. This includes **PostgreSQL minor -release updates** (security and bug fixes normally) as well as **major online -upgrades**. - -### Upgrade of the operator - -You can upgrade the operator seamlessly as a new deployment. A change in the -operator does not require a change in the operand - thanks to the instance -manager's injection. The operator can manage older versions of the operand. - -### Upgrade of the managed workload - -The operand can be upgraded using a declarative configuration approach as -part of changing the CR and, in particular, the `imageName` parameter. The -operator prevents major upgrades of PostgreSQL while making it possible to go -in both directions in terms of minor PostgreSQL releases within a major -version (enabling updates and rollbacks). - -In the presence of standby servers, the operator performs rolling updates -starting from the replicas by dropping the existing pod and creating a new -one with the new requested operand image that reuses the underlying storage. -Depending on the value of the `primaryUpdateStrategy`, the operator proceeds -with a switchover before updating the former primary (`unsupervised`) or waits -for the user to manually issue the switchover procedure (`supervised`). -Which setting to use depends on the business requirements as the operation -might generate some downtime for the applications, from a few seconds to -minutes based on the actual database workload. - -### Display cluster availability status during upgrade - -At any time, convey the cluster's high availability status, for example, `OK`, -`Failover in progress`, `Switchover in progress`, `Upgrade in progress`, or -`Upgrade failed`. - -## Level 3 - Full Lifecycle - -Capability level 3 requires the operator to manage aspects of **business -continuity** and **scalability**. -**Disaster recovery** is a business continuity component that requires -that both backup and recovery of a database work correctly. While as a -starting point, the goal is to achieve RPO < 5 minutes, the long term goal is -to implement RPO=0 backup solutions. **High Availability** is the other -important component of business continuity that, through PostgreSQL native -physical replication and hot standby replicas, allows the operator to perform -failover and switchover operations. This area includes enhancements in: - -- control of PostgreSQL physical replication, such as synchronous replication, - (cascading) replication clusters, and so on; -- connection pooling, to improve performance and control through a - connection pooling layer with pgBouncer. - -### PostgreSQL Backups - -The operator has been designed to provide application-level backups using -PostgreSQL’s native continuous backup technology based on -physical base backups and continuous WAL archiving. Specifically, -the operator currently supports only backups on AWS S3 or S3-compatible -object stores and gateways like MinIO. - -WAL archiving and base backups are defined at the cluster level, declaratively, -through the `backup` parameter in the cluster definition, by specifying -an S3 protocol destination URL (for example, to point to a specific folder in -an AWS S3 bucket) and, optionally, a generic endpoint URL. WAL archiving, -a prerequisite for continuous backup, does not require any further -action from the user: the operator will automatically and transparently set -the the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL -files to the defined endpoint. Users can decide the compression algorithm. - -You can define base backups in two ways: on-demand (through the `Backup` -custom resource definition) or scheduled (through the `ScheduledBackup` -customer resource definition, using a cron-like syntax). They both rely on -`barman-cloud-backup` for the job (distributed as part of the application -container image) to relay backups in the same endpoint, alongside WAL files. - -Both `barman-cloud-wal-restore` and `barman-cloud-backup` are distributed in -the application container image under GNU GPL 3 terms. - -### Full restore from a backup - -The operator enables users to bootstrap a new cluster (with its settings) -starting from an existing and accessible backup taken using -`barman-cloud-backup`. Once the bootstrap process is completed, the operator -initiates the instance in recovery mode and replays all available WAL files -from the specified archive, exiting recovery and starting as a primary. -Subsequently, the operator will clone the requested number of standby instances -from the primary. - -### Point-In-Time Recovery (PITR) from a backup - -The operator enables users to create a new PostgreSQL cluster by recovering -an existing backup to a specific point-in-time, defined with a timestamp, a -label or a transaction ID. This capability is built on top of the full restore -one and supports all the options available in -[PostgreSQL for PITR](https://www.postgresql.org/docs/13/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET). - -### Zero Data Loss clusters through synchronous replication - -Achieve *Zero Data Loss* (RPO=0) in your local High Availability Cloud Native PostgreSQL -cluster through quorum based synchronous replication support. The operator provides -two configuration options that control the minimum and maximum number of -expected synchronous standby replicas available at any time. The operator will -react accordingly, based on the number of available and ready PostgreSQL -instances in the cluster, through the following formula: - -``` -0 <= minSyncReplicas <= maxSyncReplicas < instances -``` - -### Liveness and readiness probes - -The operator defines liveness and readiness probes for the Postgres -Containers that are then invoked by the kubelet. They are mapped respectively -to the `/healthz` and `/readyz` endpoints of the web server managed -directly by the instance manager. They both use Go to connect to the cluster -and issue a simple query (`;`) to verify that the server is ready to accept -connections. - -### Rolling deployments - -The operator supports rolling deployments to minimize the downtime and, if a -PostgreSQL cluster is exposed publicly, the Service will load-balance the -read-only traffic only to available pods during the initialization or the -update. - -### Scale up and down of replicas - -The operator allows users to scale up and down the number of instances in a -PostgreSQL cluster. New replicas are automatically started up from the -primary server and will participate in the cluster's HA infrastructure. -The CRD declares a "scale" subresource that allows the user to use the -`kubectl scale` command. - -### Maintenance window and PodDisruptionBudget for Kubernetes nodes - -The operator creates a `PodDisruptionBudget` resource to limit the number of -concurrent disruptions to one. This configuration prevents the maintenance -operation from deleting all the pods in a cluster, allowing the specified -number of instances to be created. -The PodDisruptionBudget will be applied during the node draining operation, -preventing any disruption of the cluster service. - -While this strategy is correct for Kubernetes Clusters where -storage is shared among all the worker nodes, it may not be the best solution -for clusters using Local Storage or for clusters installed in a private -cloud. The operator allows users to specify a Maintenance Window and -configure the reaction to any underlying node eviction. The `ReusePVC` option -in the maintenance window section enables to specify the strategy to be used: -allocate new storage in a different PVC for the evicted instance or wait -for the underlying node to be available again. - -### Reuse of Persistent Volumes storage in Pods - -When the operator needs to create a pod that has been deleted by the user or -has been evicted by a Kubernetes maintenance operation, it reuses the -`PersistentVolumeClaim` if available, avoiding the need -to re-clone the data from the primary. - -### CPU and memory requests and limits - -The operator allows administrators to control and manage resource usage by -the cluster's pods, through the `resources` section of the manifest. In -particular `requests` and `limits` values can be set for both CPU and RAM. - -## Level 4 - Deep Insights - -Capability level 4 is about **observability**: in particular, monitoring, -alerting, trending, log processing. This might involve the use of external tools -such as Prometheus, Grafana, Fluent Bit, as well as extensions in the -PostgreSQL engine for the output of error logs directly in JSON format. - -### Prometheus exporter infrastructure - -The instance manager provides a pluggable framework and, via its own -web server, exposes an endpoint to export metrics for the -[Prometheus](https://prometheus.io/) monitoring and alerting tool. -Currently, only basic metrics and the `pg_stat_archiver` system view -for PostgreSQL have been implemented. - -### Kubernetes events - -Record major events as expected by the Kubernetes API, such as creating resources, -removing nodes, upgrading, and so on. Events can be displayed throught -the `kubectl describe` and `kubectl get events` command. - -## Level 5 - Auto Pilot - -Capability level 5 is focused on **automated scaling**, **healing** and -**tuning** - through the discovery of anomalies and insights emerged -from the observability layer. - -### Automated Failover for self-healing - -In case of detected failure on the primary, the operator will change the -status of the cluster by setting the most aligned replica as the new target -primary. As a consequence, the instance manager in each alive pod will -initiate the required procedures to align itself with the requested status of -the cluster, by either becoming the new primary or by following it. -In case the former primary comes back up, the same mechanism will avoid a -split-brain by preventing applications from reaching it, running `pg_rewind` on -the server and restarting it as a standby. - -### Automated recreation of a standby - -In case the pod hosting a standby has been removed, the operator initiates -the procedure to recreate a standby server. +# Operator Capability Levels + +This section provides a summary of the capabilities implemented by Cloud Native PostgreSQL, +classified using the +["Operator SDK definition of Capability Levels"](https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/) +framework. + +![Operator Capability Levels](./images/operator-capability-level.png) + +Each capability level is associated with a certain set of management features the operator offers: + +1. Basic Install +2. Seamless Upgrades +3. Full Lifecycle +4. Deep Insights +5. Auto Pilot + +!!! Note + We consider this framework as a guide for future work and implementations in the operator. + +## Level 1 - Basic Install + +Capability level 1 involves **installation** and **configuration** of the +operator. This category includes usability and user experience +enhancements, such as improvements in how users interact with the +operator and a PostgreSQL cluster configuration. + +!!! Important + We consider **Information Security** part of this level. + +### Operator deployment via declarative configuration + +The operator is installed in a declarative way using a Kubernetes manifest +which defines 3 `CustomResourceDefinition` objects: `Cluster`, `Backup`, +`ScheduledBackup`. + +### PostgreSQL cluster deployment via declarative configuration + +A PostgreSQL cluster (operand) is defined using the `Cluster` custom resource +in a fully declarative way. The PostgreSQL version is determined by the +operand container image defined in the CR, which is automatically fetched +from the requested registry. When deploying an operand, the operator also +automatically creates the following resources: `Pod`, `Service`, `Secret`, +`ConfigMap`,`PersistentVolumeClaim`, `PodDisruptionBudget`, `ServiceAccount`, +`RoleBinding`, `Role`. + +### Override of operand images through the CRD + +The operator is designed to support any operand container image with +PostgreSQL inside. +By default, the operator uses the latest available minor +version of the latest stable major version supported by the PostgreSQL +Community and published on Quay.io by EnterpriseDB. +You can use any compatible image of PostgreSQL supporting the +primary/standby architecture directly by setting the `imageName` +attribute in the CR. The operator also supports `imagePullSecretsNames` +to access private container registries. + +### Self-contained instance manager + +Instead of relying on an external tool such as Patroni or Stolon to +coordinate PostgreSQL instances in the Kubernetes cluster pods, the operator +injects the operator executable inside each pod, in a file named +`/controller/manager`. The application is used to control the underlying +PostgreSQL instance and to reconcile the pod status with the instance itself +based on the PostgreSQL cluster topology. The instance manager also starts a +web server that is invoked by the `kubelet` for probes. Unix signals invoked +by the `kubelet` are filtered by the instance manager and, where appropriate, +forwarded to the `postgres` process for fast and controlled reactions to +external events. The instance manager is written in Go and has no external +dependencies. + +### Storage configuration + +Storage is a critical component in a database workload. Taking advantage of +Kubernetes native capabilities and resources in terms of storage, the +operator gives users enough flexibility to choose the right storage for their +workload requirements, based on what the underlying Kubernetes environment +can offer. This implies choosing a particular storage class in +a public cloud environment or fine-tuning the generated PVC through a +PVC template in the CR's `storage` parameter. + +### Replica configuration + +The operator automatically detects replicas in a cluster +through a single parameter called `instances`. If set to `1`, the cluster +comprises a single primary PostgreSQL instance with no replica. If higher +than `1`, the operator manages `instances -1` replicas, including high +availability through automated failover and rolling updates through +switchover operations. + +### Database configuration + +The operator is designed to manage a PostgreSQL cluster with a single +database. The operator transparently manages access to the database through +two Kubernetes services automatically provisioned and managed for read-write +and read-only workloads. +Using the convention over configuration approach, the operator creates a +database called `app`, by default owned by a regular Postgres user with the +same name. Both the database name and the user name can be specified if +required. +Although no configuration is required to run the cluster, users can customize +both PostgreSQL run-time configuration and PostgreSQL Host-Based +Authentication rules in the `postgresql` section of the CR. + +### Pod Security Policies + +For InfoSec requirements, the operator does not need privileged mode for the +execution of containers and access to volumes both in the operator and in the +operand. + +### License keys + +The operator comes with support for license keys, with the possibility to +programmatically define a default behavior in case of the absence of a key. +Cloud Native PostgreSQL has been programmed to create an implicit 30-day +trial license for every deployed cluster. +License keys are signed strings that the operator can verify using an +asymmetric key technique. The content is a JSON object that includes the +product, the cluster identifiers (namespace and name), the number of +instances, the expiration date, and, if required, the credentials to be used +as a secret by the operator to pull down an image from a protected container +registry. Beyond the expiration date, the operator will stop any +reconciliation process until the license key is restored. + +### Current status of the cluster + +The operator continuously updates the status section of the CR with the +observed status of the cluster. The entire PostgreSQL cluster status is +continuously monitored by the instance manager running in each pod: the +instance manager is responsible for applying the required changes to the +controlled PostgreSQL instance to converge to the required status of +the cluster (for example: if the cluster status reports that pod `-1` is the +primary, pod `-1` needs to promote itself while the other pods need to follow +pod `-1`). The same status is used by Kubernetes client applications to +provide details, including the OpenShift dashboard. + +### Operator's certification authority + +The operator automatically creates a certification authority for itself. +It creates and signs with the operator certification authority a leaf certificate +to be used by the webhook server, to ensure safe communication between the +Kubernetes API Server and the operator itself. + +### Cluster's certification authority + +The operator automatically creates a certification authority for every PostgreSQL +cluster, which is used to issue and renew TLS certificates for the authentication +of streaming replication standby servers and applications (instead of passwords). +The operator will use the Certification Authority to sign every cluster +certification authority. + +### TLS connections + +The operator transparently and natively supports TLS/SSL connections +to encrypt client/server communications for increased security using the +cluster's certification authority. + +### Certificate authentication for streaming replication + +The operator relies on TLS client certificate authentication to authorize streaming +replication connections from the standby servers, instead of relying on a password +(and therefore a secret). + +### Continuous configuration management + +The operator enables users to apply changes to the `Cluster` resource YAML +section of the PostgreSQL configuration and makes sure that all instances +are properly reloaded or restarted, depending on the configuration option. +*Current limitations:* changes with `ALTER SYSTEM` are not detected, meaning +that the cluster state is not enforced; proper restart order is not implemented +with [hot standby sensitive parameters](https://www.postgresql.org/docs/current/hot-standby.html#HOT-STANDBY-ADMIN) +such as `max_connections` and `max_wal_senders`. + +### Multiple installation methods + +The operator can be installed through a Kubernetes manifest via `kubectl +apply`, to be used in a traditional Kubernetes installation in public +and private cloud environments. Additionally, it can be deployed on OpenShift +Container Platform via OperatorHub. + +### Convention over configuration + +The operator supports the convention over configuration paradigm, deciding +standard default values while allowing users to override them and customize +them. You can specify a deployment of a PostgreSQL cluster using +the `Cluster` CRD in a couple of YAML code lines. + +## Level 2 - Seamless Upgrades + +Capability level 2 is about enabling **updates of the operator and the actual +workload**, in our case PostgreSQL servers. This includes **PostgreSQL minor +release updates** (security and bug fixes normally) as well as **major online +upgrades**. + +### Upgrade of the operator + +You can upgrade the operator seamlessly as a new deployment. A change in the +operator does not require a change in the operand - thanks to the instance +manager's injection. The operator can manage older versions of the operand. + +### Upgrade of the managed workload + +The operand can be upgraded using a declarative configuration approach as +part of changing the CR and, in particular, the `imageName` parameter. The +operator prevents major upgrades of PostgreSQL while making it possible to go +in both directions in terms of minor PostgreSQL releases within a major +version (enabling updates and rollbacks). + +In the presence of standby servers, the operator performs rolling updates +starting from the replicas by dropping the existing pod and creating a new +one with the new requested operand image that reuses the underlying storage. +Depending on the value of the `primaryUpdateStrategy`, the operator proceeds +with a switchover before updating the former primary (`unsupervised`) or waits +for the user to manually issue the switchover procedure (`supervised`). +Which setting to use depends on the business requirements as the operation +might generate some downtime for the applications, from a few seconds to +minutes based on the actual database workload. + +### Display cluster availability status during upgrade + +At any time, convey the cluster's high availability status, for example, `OK`, +`Failover in progress`, `Switchover in progress`, `Upgrade in progress`, or +`Upgrade failed`. + +## Level 3 - Full Lifecycle + +Capability level 3 requires the operator to manage aspects of **business +continuity** and **scalability**. +**Disaster recovery** is a business continuity component that requires +that both backup and recovery of a database work correctly. While as a +starting point, the goal is to achieve RPO < 5 minutes, the long term goal is +to implement RPO=0 backup solutions. **High Availability** is the other +important component of business continuity that, through PostgreSQL native +physical replication and hot standby replicas, allows the operator to perform +failover and switchover operations. This area includes enhancements in: + +- control of PostgreSQL physical replication, such as synchronous replication, + (cascading) replication clusters, and so on; +- connection pooling, to improve performance and control through a + connection pooling layer with pgBouncer. + +### PostgreSQL Backups + +The operator has been designed to provide application-level backups using +PostgreSQL’s native continuous backup technology based on +physical base backups and continuous WAL archiving. Specifically, +the operator currently supports only backups on AWS S3 or S3-compatible +object stores and gateways like MinIO. + +WAL archiving and base backups are defined at the cluster level, declaratively, +through the `backup` parameter in the cluster definition, by specifying +an S3 protocol destination URL (for example, to point to a specific folder in +an AWS S3 bucket) and, optionally, a generic endpoint URL. WAL archiving, +a prerequisite for continuous backup, does not require any further +action from the user: the operator will automatically and transparently set +the the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL +files to the defined endpoint. Users can decide the compression algorithm. + +You can define base backups in two ways: on-demand (through the `Backup` +custom resource definition) or scheduled (through the `ScheduledBackup` +customer resource definition, using a cron-like syntax). They both rely on +`barman-cloud-backup` for the job (distributed as part of the application +container image) to relay backups in the same endpoint, alongside WAL files. + +Both `barman-cloud-wal-restore` and `barman-cloud-backup` are distributed in +the application container image under GNU GPL 3 terms. + +### Full restore from a backup + +The operator enables users to bootstrap a new cluster (with its settings) +starting from an existing and accessible backup taken using +`barman-cloud-backup`. Once the bootstrap process is completed, the operator +initiates the instance in recovery mode and replays all available WAL files +from the specified archive, exiting recovery and starting as a primary. +Subsequently, the operator will clone the requested number of standby instances +from the primary. + +### Point-In-Time Recovery (PITR) from a backup + +The operator enables users to create a new PostgreSQL cluster by recovering +an existing backup to a specific point-in-time, defined with a timestamp, a +label or a transaction ID. This capability is built on top of the full restore +one and supports all the options available in +[PostgreSQL for PITR](https://www.postgresql.org/docs/13/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET). + +### Zero Data Loss clusters through synchronous replication + +Achieve *Zero Data Loss* (RPO=0) in your local High Availability Cloud Native PostgreSQL +cluster through quorum based synchronous replication support. The operator provides +two configuration options that control the minimum and maximum number of +expected synchronous standby replicas available at any time. The operator will +react accordingly, based on the number of available and ready PostgreSQL +instances in the cluster, through the following formula: + +``` +0 <= minSyncReplicas <= maxSyncReplicas < instances +``` + +### Liveness and readiness probes + +The operator defines liveness and readiness probes for the Postgres +Containers that are then invoked by the kubelet. They are mapped respectively +to the `/healthz` and `/readyz` endpoints of the web server managed +directly by the instance manager. They both use Go to connect to the cluster +and issue a simple query (`;`) to verify that the server is ready to accept +connections. + +### Rolling deployments + +The operator supports rolling deployments to minimize the downtime and, if a +PostgreSQL cluster is exposed publicly, the Service will load-balance the +read-only traffic only to available pods during the initialization or the +update. + +### Scale up and down of replicas + +The operator allows users to scale up and down the number of instances in a +PostgreSQL cluster. New replicas are automatically started up from the +primary server and will participate in the cluster's HA infrastructure. +The CRD declares a "scale" subresource that allows the user to use the +`kubectl scale` command. + +### Maintenance window and PodDisruptionBudget for Kubernetes nodes + +The operator creates a `PodDisruptionBudget` resource to limit the number of +concurrent disruptions to one. This configuration prevents the maintenance +operation from deleting all the pods in a cluster, allowing the specified +number of instances to be created. +The PodDisruptionBudget will be applied during the node draining operation, +preventing any disruption of the cluster service. + +While this strategy is correct for Kubernetes Clusters where +storage is shared among all the worker nodes, it may not be the best solution +for clusters using Local Storage or for clusters installed in a private +cloud. The operator allows users to specify a Maintenance Window and +configure the reaction to any underlying node eviction. The `ReusePVC` option +in the maintenance window section enables to specify the strategy to be used: +allocate new storage in a different PVC for the evicted instance or wait +for the underlying node to be available again. + +### Reuse of Persistent Volumes storage in Pods + +When the operator needs to create a pod that has been deleted by the user or +has been evicted by a Kubernetes maintenance operation, it reuses the +`PersistentVolumeClaim` if available, avoiding the need +to re-clone the data from the primary. + +### CPU and memory requests and limits + +The operator allows administrators to control and manage resource usage by +the cluster's pods, through the `resources` section of the manifest. In +particular `requests` and `limits` values can be set for both CPU and RAM. + +## Level 4 - Deep Insights + +Capability level 4 is about **observability**: in particular, monitoring, +alerting, trending, log processing. This might involve the use of external tools +such as Prometheus, Grafana, Fluent Bit, as well as extensions in the +PostgreSQL engine for the output of error logs directly in JSON format. + +### Prometheus exporter infrastructure + +The instance manager provides a pluggable framework and, via its own +web server, exposes an endpoint to export metrics for the +[Prometheus](https://prometheus.io/) monitoring and alerting tool. +Currently, only basic metrics and the `pg_stat_archiver` system view +for PostgreSQL have been implemented. + +### Kubernetes events + +Record major events as expected by the Kubernetes API, such as creating resources, +removing nodes, upgrading, and so on. Events can be displayed through +the `kubectl describe` and `kubectl get events` command. + +## Level 5 - Auto Pilot + +Capability level 5 is focused on **automated scaling**, **healing** and +**tuning** - through the discovery of anomalies and insights emerged +from the observability layer. + +### Automated Failover for self-healing + +In case of detected failure on the primary, the operator will change the +status of the cluster by setting the most aligned replica as the new target +primary. As a consequence, the instance manager in each alive pod will +initiate the required procedures to align itself with the requested status of +the cluster, by either becoming the new primary or by following it. +In case the former primary comes back up, the same mechanism will avoid a +split-brain by preventing applications from reaching it, running `pg_rewind` on +the server and restarting it as a standby. + +### Automated recreation of a standby + +In case the pod hosting a standby has been removed, the operator initiates +the procedure to recreate a standby server. diff --git a/temp_kubernetes/original/src/postgresql_conf.md b/temp_kubernetes/original/src/postgresql_conf.md old mode 100755 new mode 100644 index c276085fb26..2ffe978cdf6 --- a/temp_kubernetes/original/src/postgresql_conf.md +++ b/temp_kubernetes/original/src/postgresql_conf.md @@ -1,227 +1,227 @@ -# PostgreSQL Configuration - -Users that are familiar with PostgreSQL are aware of the existence of the following two files -to configure an instance: - -- `postgresql.conf`: main run-time configuration file of PostgreSQL -- `pg_hba.conf`: clients authentication file - -Due to the concepts of declarative configuration and immutability of the PostgreSQL -containers, users are not allowed to directly touch those files. Configuration -is possible through the `postgresql` section of the `Cluster` resource definition -by defining custom `postgresql.conf` and `pg_hba.conf` settings via the -`parameters` and the `pg_hba` keys. -A reference for custom settings usage is included in the samples, see -[`cluster-example-custom.yaml`](samples/cluster-example-custom.yaml). - -These settings are the same across all instances. - -!!! Warning - **OpenShift users:** due to a current limitation of the OpenShift user interface, - it is possible to change PostgreSQL settings from the YAML pane only. - -## The `postgresql` section - -The PostgreSQL instance in the pod starts with a default `postgresql.conf` file, -to which these settings are automatically added: - -```text -listen_addresses = '*' -include custom.conf -``` - -The `custom.conf` file will contain the user-defined settings. Refer to the -PostgreSQL documentation for [more information on the available parameters](https://www.postgresql.org/docs/current/runtime-config.html). -The content of `custom.conf` is automatically generated and maintained by the -operator by applying the following sections in this order: - -- Global default parameters -- Default parameters that depend on the PostgreSQL major version -- User-provided parameters -- Fixed parameters - -The **global default parameters** are: - -```text -logging_collector = 'off' -max_parallel_workers = '32' -max_replication_slots = '32' -max_worker_processes = '32' -``` - -The **default parameters for PostgreSQL 13 or higher** are: - -```text -wal_keep_size = '512MB' -``` - -The **default parameters for PostgreSQL 10 to 12** are: - -```text -wal_keep_segments = '32' -``` - -The following parameters are **fixed** and exclusively controlled by the operator: - -```text -archive_command = '/controller/manager wal-archive %p' -archive_mode = 'on' -archive_timeout = '5min' -full_page_writes = 'on' -hot_standby = 'true' -listen_addresses = '*' -port = '5432' -ssl = 'on' -ssl_ca_file = '/tmp/ca.crt' -ssl_cert_file = '/tmp/server.crt' -ssl_key_file = '/tmp/server.key' -unix_socket_directories = '/var/run/postgresql' -wal_level = 'logical' -wal_log_hints = 'on' -``` - -Since the fixed parameters are added last, they can't be overridden by the -user via the YAML configuration. Those parameters are required for correct WAL -archiving and replication. - -### Replication settings - -The `primary_conninfo` and `recovery_target_timeline` parameters are managed -automatically by the operator according to the state of the instance in -the cluster. - -```text -primary_conninfo = 'host=cluster-example-rw user=postgres dbname=postgres' -recovery_target_timeline = 'latest' -``` - -## The `pg_hba` section - -`pg_hba` is a list of PostgreSQL Host Based Authentication rules -used to create the `pg_hba.conf` used by the pods. - -Since the first matching rule is used for authentication, the `pg_hba.conf` file -generated by the operator can be seen as composed of three sections: - -1. Fixed rules -2. User-defined rules -3. Default rules - -Fixed rules: - -```text -local all all peer - -hostssl postgres streaming_replica all cert clientcert=1 -hostssl replication streaming_replica all cert clientcert=1 -``` - -Default rules: - -```text -host all all all md5 -``` - -The resulting `pg_hba.conf` will look like this: - -```text -local all all peer - -hostssl postgres streaming_replica all cert clientcert=1 -hostssl replication streaming_replica all cert clientcert=1 - - - -host all all all md5 -``` - -Refer to the PostgreSQL documentation for [more information on `pg_hba.conf`](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html). - -## Changing configuration - -You can apply configuration changes by editing the `postgresql` section of -the `Cluster` resource. - -After the change, the cluster instances will immediately reload the -configuration to apply the changes. -If the change involves a parameter requiring a restart, the operator will -perform a rolling upgrade. - -## Fixed parameters - -Some PostgreSQL configuration parameters should be managed exclusively by the -operator. The operator prevents the user from setting them using a webhook. - -Users are not allowed to set the following configuration parameters in the -`postgresql` section: - -- `allow_system_table_mods` -- `archive_cleanup_command` -- `archive_command` -- `archive_mode` -- `archive_timeout` -- `bonjour_name` -- `bonjour` -- `cluster_name` -- `config_file` -- `data_directory` -- `data_sync_retry` -- `dynamic_shared_memory_type` -- `event_source` -- `external_pid_file` -- `full_page_writes` -- `hba_file` -- `hot_standby` -- `huge_pages` -- `ident_file` -- `jit_provider` -- `listen_addresses` -- `log_destination` -- `log_directory` -- `log_file_mode` -- `log_filename` -- `log_rotation_age` -- `log_rotation_size` -- `log_truncate_on_rotation` -- `logging_collector` -- `port` -- `primary_conninfo` -- `primary_slot_name` -- `promote_trigger_file` -- `recovery_end_command` -- `recovery_min_apply_delay` -- `recovery_target_action` -- `recovery_target_inclusive` -- `recovery_target_lsn` -- `recovery_target_name` -- `recovery_target_time` -- `recovery_target_timeline` -- `recovery_target_xid` -- `recovery_target` -- `restart_after_crash` -- `restore_command` -- `shared_memory_type` -- `ssl_ca_file` -- `ssl_cert_file` -- `ssl_ciphers` -- `ssl_crl_file` -- `ssl_dh_params_file` -- `ssl_ecdh_curve` -- `ssl_key_file` -- `ssl_max_protocol_version` -- `ssl_min_protocol_version` -- `ssl_passphrase_command_supports_reload` -- `ssl_passphrase_command` -- `ssl_prefer_server_ciphers` -- `ssl` -- `stats_temp_directory` -- `synchronous_standby_names` -- `syslog_facility` -- `syslog_ident` -- `syslog_sequence_numbers` -- `syslog_split_messages` -- `unix_socket_directories` -- `unix_socket_group` -- `unix_socket_permissions` -- `wal_level` -- `wal_log_hints` +# PostgreSQL Configuration + +Users that are familiar with PostgreSQL are aware of the existence of the following two files +to configure an instance: + +- `postgresql.conf`: main run-time configuration file of PostgreSQL +- `pg_hba.conf`: clients authentication file + +Due to the concepts of declarative configuration and immutability of the PostgreSQL +containers, users are not allowed to directly touch those files. Configuration +is possible through the `postgresql` section of the `Cluster` resource definition +by defining custom `postgresql.conf` and `pg_hba.conf` settings via the +`parameters` and the `pg_hba` keys. +A reference for custom settings usage is included in the samples, see +[`cluster-example-custom.yaml`](samples/cluster-example-custom.yaml). + +These settings are the same across all instances. + +!!! Warning + **OpenShift users:** due to a current limitation of the OpenShift user interface, + it is possible to change PostgreSQL settings from the YAML pane only. + +## The `postgresql` section + +The PostgreSQL instance in the pod starts with a default `postgresql.conf` file, +to which these settings are automatically added: + +```text +listen_addresses = '*' +include custom.conf +``` + +The `custom.conf` file will contain the user-defined settings. Refer to the +PostgreSQL documentation for [more information on the available parameters](https://www.postgresql.org/docs/current/runtime-config.html). +The content of `custom.conf` is automatically generated and maintained by the +operator by applying the following sections in this order: + +- Global default parameters +- Default parameters that depend on the PostgreSQL major version +- User-provided parameters +- Fixed parameters + +The **global default parameters** are: + +```text +logging_collector = 'off' +max_parallel_workers = '32' +max_replication_slots = '32' +max_worker_processes = '32' +``` + +The **default parameters for PostgreSQL 13 or higher** are: + +```text +wal_keep_size = '512MB' +``` + +The **default parameters for PostgreSQL 10 to 12** are: + +```text +wal_keep_segments = '32' +``` + +The following parameters are **fixed** and exclusively controlled by the operator: + +```text +archive_command = '/controller/manager wal-archive %p' +archive_mode = 'on' +archive_timeout = '5min' +full_page_writes = 'on' +hot_standby = 'true' +listen_addresses = '*' +port = '5432' +ssl = 'on' +ssl_ca_file = '/tmp/ca.crt' +ssl_cert_file = '/tmp/server.crt' +ssl_key_file = '/tmp/server.key' +unix_socket_directories = '/var/run/postgresql' +wal_level = 'logical' +wal_log_hints = 'on' +``` + +Since the fixed parameters are added last, they can't be overridden by the +user via the YAML configuration. Those parameters are required for correct WAL +archiving and replication. + +### Replication settings + +The `primary_conninfo` and `recovery_target_timeline` parameters are managed +automatically by the operator according to the state of the instance in +the cluster. + +```text +primary_conninfo = 'host=cluster-example-rw user=postgres dbname=postgres' +recovery_target_timeline = 'latest' +``` + +## The `pg_hba` section + +`pg_hba` is a list of PostgreSQL Host Based Authentication rules +used to create the `pg_hba.conf` used by the pods. + +Since the first matching rule is used for authentication, the `pg_hba.conf` file +generated by the operator can be seen as composed of three sections: + +1. Fixed rules +2. User-defined rules +3. Default rules + +Fixed rules: + +```text +local all all peer + +hostssl postgres streaming_replica all cert clientcert=1 +hostssl replication streaming_replica all cert clientcert=1 +``` + +Default rules: + +```text +host all all all md5 +``` + +The resulting `pg_hba.conf` will look like this: + +```text +local all all peer + +hostssl postgres streaming_replica all cert clientcert=1 +hostssl replication streaming_replica all cert clientcert=1 + + + +host all all all md5 +``` + +Refer to the PostgreSQL documentation for [more information on `pg_hba.conf`](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html). + +## Changing configuration + +You can apply configuration changes by editing the `postgresql` section of +the `Cluster` resource. + +After the change, the cluster instances will immediately reload the +configuration to apply the changes. +If the change involves a parameter requiring a restart, the operator will +perform a rolling upgrade. + +## Fixed parameters + +Some PostgreSQL configuration parameters should be managed exclusively by the +operator. The operator prevents the user from setting them using a webhook. + +Users are not allowed to set the following configuration parameters in the +`postgresql` section: + +- `allow_system_table_mods` +- `archive_cleanup_command` +- `archive_command` +- `archive_mode` +- `archive_timeout` +- `bonjour_name` +- `bonjour` +- `cluster_name` +- `config_file` +- `data_directory` +- `data_sync_retry` +- `dynamic_shared_memory_type` +- `event_source` +- `external_pid_file` +- `full_page_writes` +- `hba_file` +- `hot_standby` +- `huge_pages` +- `ident_file` +- `jit_provider` +- `listen_addresses` +- `log_destination` +- `log_directory` +- `log_file_mode` +- `log_filename` +- `log_rotation_age` +- `log_rotation_size` +- `log_truncate_on_rotation` +- `logging_collector` +- `port` +- `primary_conninfo` +- `primary_slot_name` +- `promote_trigger_file` +- `recovery_end_command` +- `recovery_min_apply_delay` +- `recovery_target_action` +- `recovery_target_inclusive` +- `recovery_target_lsn` +- `recovery_target_name` +- `recovery_target_time` +- `recovery_target_timeline` +- `recovery_target_xid` +- `recovery_target` +- `restart_after_crash` +- `restore_command` +- `shared_memory_type` +- `ssl_ca_file` +- `ssl_cert_file` +- `ssl_ciphers` +- `ssl_crl_file` +- `ssl_dh_params_file` +- `ssl_ecdh_curve` +- `ssl_key_file` +- `ssl_max_protocol_version` +- `ssl_min_protocol_version` +- `ssl_passphrase_command_supports_reload` +- `ssl_passphrase_command` +- `ssl_prefer_server_ciphers` +- `ssl` +- `stats_temp_directory` +- `synchronous_standby_names` +- `syslog_facility` +- `syslog_ident` +- `syslog_sequence_numbers` +- `syslog_split_messages` +- `unix_socket_directories` +- `unix_socket_group` +- `unix_socket_permissions` +- `wal_level` +- `wal_log_hints` diff --git a/temp_kubernetes/original/src/quickstart.md b/temp_kubernetes/original/src/quickstart.md old mode 100755 new mode 100644 index f74e7fce331..0f04eab29f0 --- a/temp_kubernetes/original/src/quickstart.md +++ b/temp_kubernetes/original/src/quickstart.md @@ -1,171 +1,171 @@ -# Quickstart - -This section describes how to test a PostgreSQL cluster on your laptop/computer -using Cloud Native PostgreSQL on a local Kubernetes cluster in -[Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) or -[Kind](https://kind.sigs.k8s.io/). - -RedHat OpenShift Container Platform users can test the certified operator for -Cloud Native PostgreSQL on the [Red Hat CodeReady Containers (CRC)](https://developers.redhat.com/products/codeready-containers/overview) -for OpenShift. - -!!! Warning - The instructions contained in this section are for demonstration, - testing, and practice purposes only and must not be used in production. - -Like any other Kubernetes application, Cloud Native PostgreSQL is deployed using -regular manifests written in YAML. - -By following the instructions on this page you should be able to start a PostgreSQL -cluster on your local Kubernetes/Openshift installation and experiment with it. - -!!! Important - Make sure that you have `kubectl` installed on your machine in order - to connect to the Kubernetes cluster, or `oc` if using CRC for OpenShift. - Please follow the Kubernetes documentation on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - or the Openshift one on [how to install `oc`](https://docs.openshift.com/container-platform/4.6/cli_reference/openshift_cli/getting-started-cli.html). - - -!!! Note - If you are running Openshift, use `oc` every time `kubectl` is mentioned - in this documentation. `kubectl` commands are compatible with `oc` ones. - -## Part 1 - Setup the local Kubernetes/Openshift playground - -The first part is about installing Minikube, Kind, or CRC. Please spend some time -reading about the systems and decide which one to proceed with. -After setting up one of them, please proceed with part 2. - -### Minikube - -Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a -single-node Kubernetes cluster inside a Virtual Machine (VM) on your laptop for -users looking to try out Kubernetes or develop with it day-to-day. Normally, it -is used in conjunction with VirtualBox. - -You can find more information in the official [Kubernetes documentation on how to -install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube) in your local personal environment. -When you installed it, run the following command to create a minikube cluster: - -```sh -minikube start -``` - -This will create the Kubernetes cluster, and you will be ready to use it. -Verify that it works with the following command: - -```sh -kubectl get nodes -``` - -You will see one node called `minikube`. - -### Kind - -If you do not want to use a virtual machine hypervisor, then Kind is a tool for running -local Kubernetes clusters using Docker container "nodes" (Kind stands for "Kubernetes IN Docker" indeed). - -Install `kind` on your environment following the instructions in the [Quickstart](https://kind.sigs.k8s.io/docs/user/quick-start), -then create a Kubernetes cluster with: - -```sh -kind create cluster --name pg -``` - -### CodeReady Containers (CRC) - -[Download RedHat CRC](https://developers.redhat.com/products/codeready-containers/overview) -and move the binary inside a directory in your `PATH`. - -You can then run the following commands: -``` -crc setup -crc start -``` - -The `crc start` output will explain how to proceed. You'll then need to -execute the output of the `crc oc-env` command. -After that, you can log in as `kubeadmin` with the printed `oc login` -command. You can also open the web console running `crc console`. -User and password are the same as for the `oc login` command. - -CRC doesn't come with a StorageClass, so one has to be configured. -You can follow the [Dynamic volume provisioning wiki page](https://github.com/code-ready/crc/wiki/Dynamic-volume-provisioning) -and install `rancher/local-path-provisioner`. - -## Part 2 - Install Cloud Native PostgreSQL - -Now that you have a Kubernetes or OpenShift installation up and running -on your laptop, you can proceed with Cloud Native PostgreSQL installation. - -Please refer to the ["Installation"](installation.md) section and then proceed -with the deployment of a PostgreSQL cluster. - -## Part 3 - Deploy a PostgreSQL cluster - -As with any other deployment in Kubernetes, to deploy a PostgreSQL cluster -you need to apply a configuration file that defines your desired `Cluster`. - -The [`cluster-example.yaml`](samples/cluster-example.yaml) sample file -defines a simple `Cluster` using the default storage class to allocate -disk space: - -```yaml -# Example of PostgreSQL cluster -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example -spec: - instances: 3 - - # Example of rolling update strategy: - # - unsupervised: automated update of the primary once all - # replicas have been upgraded (default) - # - supervised: requires manual supervision to perform - # the switchover of the primary - primaryUpdateStrategy: unsupervised - - # Require 1Gi of space - storage: - size: 1Gi -``` - -!!! Note "There's more" - For more detailed information about the available options, please refer - to the ["API Reference" section](api_reference.md). - -In order to create the 3-node PostgreSQL cluster, you need to run the following command: - -```sh -kubectl apply -f cluster-example.yaml -``` - -You can check that the pods are being created with the `get pods` command: - -```sh -kubectl get pods -``` - -By default, the operator will install the latest available minor version -of the latest major version of PostgreSQL when the operator was released. -You can override this by setting the `imageName` key in the `spec` section of -the `Cluster` definition. For example, to install PostgreSQL 12.5: - -```yaml -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - # [...] -spec: - # [...] - imageName: quay.io/enterprisedb/postgresql:12.5 - #[...] -``` - -!!! Important - The immutable infrastructure paradigm requires that you always - point to a specific version of the container image. - Never use tags like `latest` or `13` in a production environment - as it might lead to unpredictable scenarios in terms of update - policies and version consistency in the cluster. +# Quickstart + +This section describes how to test a PostgreSQL cluster on your laptop/computer +using Cloud Native PostgreSQL on a local Kubernetes cluster in +[Minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) or +[Kind](https://kind.sigs.k8s.io/). + +RedHat OpenShift Container Platform users can test the certified operator for +Cloud Native PostgreSQL on the [Red Hat CodeReady Containers (CRC)](https://developers.redhat.com/products/codeready-containers/overview) +for OpenShift. + +!!! Warning + The instructions contained in this section are for demonstration, + testing, and practice purposes only and must not be used in production. + +Like any other Kubernetes application, Cloud Native PostgreSQL is deployed using +regular manifests written in YAML. + +By following the instructions on this page you should be able to start a PostgreSQL +cluster on your local Kubernetes/Openshift installation and experiment with it. + +!!! Important + Make sure that you have `kubectl` installed on your machine in order + to connect to the Kubernetes cluster, or `oc` if using CRC for OpenShift. + Please follow the Kubernetes documentation on [how to install `kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + or the Openshift one on [how to install `oc`](https://docs.openshift.com/container-platform/4.6/cli_reference/openshift_cli/getting-started-cli.html). + + +!!! Note + If you are running Openshift, use `oc` every time `kubectl` is mentioned + in this documentation. `kubectl` commands are compatible with `oc` ones. + +## Part 1 - Setup the local Kubernetes/Openshift playground + +The first part is about installing Minikube, Kind, or CRC. Please spend some time +reading about the systems and decide which one to proceed with. +After setting up one of them, please proceed with part 2. + +### Minikube + +Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a +single-node Kubernetes cluster inside a Virtual Machine (VM) on your laptop for +users looking to try out Kubernetes or develop with it day-to-day. Normally, it +is used in conjunction with VirtualBox. + +You can find more information in the official [Kubernetes documentation on how to +install Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube) in your local personal environment. +When you installed it, run the following command to create a minikube cluster: + +```sh +minikube start +``` + +This will create the Kubernetes cluster, and you will be ready to use it. +Verify that it works with the following command: + +```sh +kubectl get nodes +``` + +You will see one node called `minikube`. + +### Kind + +If you do not want to use a virtual machine hypervisor, then Kind is a tool for running +local Kubernetes clusters using Docker container "nodes" (Kind stands for "Kubernetes IN Docker" indeed). + +Install `kind` on your environment following the instructions in the [Quickstart](https://kind.sigs.k8s.io/docs/user/quick-start), +then create a Kubernetes cluster with: + +```sh +kind create cluster --name pg +``` + +### CodeReady Containers (CRC) + +[Download RedHat CRC](https://developers.redhat.com/products/codeready-containers/overview) +and move the binary inside a directory in your `PATH`. + +You can then run the following commands: +``` +crc setup +crc start +``` + +The `crc start` output will explain how to proceed. You'll then need to +execute the output of the `crc oc-env` command. +After that, you can log in as `kubeadmin` with the printed `oc login` +command. You can also open the web console running `crc console`. +User and password are the same as for the `oc login` command. + +CRC doesn't come with a StorageClass, so one has to be configured. +You can follow the [Dynamic volume provisioning wiki page](https://github.com/code-ready/crc/wiki/Dynamic-volume-provisioning) +and install `rancher/local-path-provisioner`. + +## Part 2 - Install Cloud Native PostgreSQL + +Now that you have a Kubernetes or OpenShift installation up and running +on your laptop, you can proceed with Cloud Native PostgreSQL installation. + +Please refer to the ["Installation"](installation.md) section and then proceed +with the deployment of a PostgreSQL cluster. + +## Part 3 - Deploy a PostgreSQL cluster + +As with any other deployment in Kubernetes, to deploy a PostgreSQL cluster +you need to apply a configuration file that defines your desired `Cluster`. + +The [`cluster-example.yaml`](samples/cluster-example.yaml) sample file +defines a simple `Cluster` using the default storage class to allocate +disk space: + +```yaml +# Example of PostgreSQL cluster +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + + # Require 1Gi of space + storage: + size: 1Gi +``` + +!!! Note "There's more" + For more detailed information about the available options, please refer + to the ["API Reference" section](api_reference.md). + +In order to create the 3-node PostgreSQL cluster, you need to run the following command: + +```sh +kubectl apply -f cluster-example.yaml +``` + +You can check that the pods are being created with the `get pods` command: + +```sh +kubectl get pods +``` + +By default, the operator will install the latest available minor version +of the latest major version of PostgreSQL when the operator was released. +You can override this by setting the `imageName` key in the `spec` section of +the `Cluster` definition. For example, to install PostgreSQL 12.5: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + # [...] +spec: + # [...] + imageName: quay.io/enterprisedb/postgresql:12.5 + #[...] +``` + +!!! Important + The immutable infrastructure paradigm requires that you always + point to a specific version of the container image. + Never use tags like `latest` or `13` in a production environment + as it might lead to unpredictable scenarios in terms of update + policies and version consistency in the cluster. diff --git a/temp_kubernetes/original/src/rolling_update.md b/temp_kubernetes/original/src/rolling_update.md old mode 100755 new mode 100644 index 118a0b1bcfc..57009b5aa7b --- a/temp_kubernetes/original/src/rolling_update.md +++ b/temp_kubernetes/original/src/rolling_update.md @@ -1,42 +1,42 @@ -# Rolling Updates - -The operator allows changing the PostgreSQL version used in a cluster while -applications are running against it. - -!!! Important - Only upgrades for PostgreSQL minor releases are supported. - -Rolling upgrades are started when: - -- the user changes the `imageName` attribute of the cluster specification; - -- after the operator is updated, to ensure the Pods run the latest instance - manager; - -- when a change in the PostgreSQL configuration requires a restart to be - applied. - -The operator starts upgrading all the replicas, one Pod at a time, starting -from the one with the highest serial. - -The primary is the last node to be upgraded. This operation is configurable and -managed by the `primaryUpdateStrategy` option, accepting these two values: - -- `switchover`: the rolling update process is managed by Kubernetes - and is entirely automated, with the *switchover* operation - starting once all the replicas have been upgraded -- `manual`: the rolling update process is suspended immediately - after all replicas have been upgraded and can only be completed - with a manual switchover triggered by an administrator with - `kubectl cnp promote [cluster] [pod]`. The plugin can be downloaded from the - [`kubectl-cnp` project page](https://github.com/EnterpriseDB/kubectl-cnp) - on GitHub. - -The default and recommended value is `switchover`. - -The upgrade keeps the Cloud Native PostgreSQL identity and does not -reclone the data. Pods will be deleted and created again with the same PVCs. - -During the rolling update procedure, the services endpoints move to reflect -the cluster's status, so the applications ignore the node that -is updating. +# Rolling Updates + +The operator allows changing the PostgreSQL version used in a cluster while +applications are running against it. + +!!! Important + Only upgrades for PostgreSQL minor releases are supported. + +Rolling upgrades are started when: + +- the user changes the `imageName` attribute of the cluster specification; + +- after the operator is updated, to ensure the Pods run the latest instance + manager; + +- when a change in the PostgreSQL configuration requires a restart to be + applied. + +The operator starts upgrading all the replicas, one Pod at a time, starting +from the one with the highest serial. + +The primary is the last node to be upgraded. This operation is configurable and +managed by the `primaryUpdateStrategy` option, accepting these two values: + +- `switchover`: the rolling update process is managed by Kubernetes + and is entirely automated, with the *switchover* operation + starting once all the replicas have been upgraded +- `manual`: the rolling update process is suspended immediately + after all replicas have been upgraded and can only be completed + with a manual switchover triggered by an administrator with + `kubectl cnp promote [cluster] [pod]`. The plugin can be downloaded from the + [`kubectl-cnp` project page](https://github.com/EnterpriseDB/kubectl-cnp) + on GitHub. + +The default and recommended value is `switchover`. + +The upgrade keeps the Cloud Native PostgreSQL identity and does not +re-clone the data. Pods will be deleted and created again with the same PVCs. + +During the rolling update procedure, the services endpoints move to reflect +the cluster's status, so the applications ignore the node that +is updating. diff --git a/temp_kubernetes/original/src/samples.md b/temp_kubernetes/original/src/samples.md old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/backup-example.yaml b/temp_kubernetes/original/src/samples/backup-example.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-example-custom.yaml b/temp_kubernetes/original/src/samples/cluster-example-custom.yaml old mode 100755 new mode 100644 index 1878cf7e402..49223affa68 --- a/temp_kubernetes/original/src/samples/cluster-example-custom.yaml +++ b/temp_kubernetes/original/src/samples/cluster-example-custom.yaml @@ -1,25 +1,28 @@ -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-custom -spec: - instances: 3 - - # Parameters and pg_hba configuration will be append - # to the default ones to make the cluster work - postgresql: - parameters: - max_worker_processes: "60" - pg_hba: - - host all all all trust - - # Example of rolling update strategy: - # - unsupervised: automated update of the primary once all - # replicas have been upgraded (default) - # - supervised: requires manual supervision to perform - # the switchover of the primary - primaryUpdateStrategy: unsupervised - - # Require 1Gi of space per instance using default storage class - storage: - size: 1Gi +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-custom +spec: + instances: 3 + + # Parameters and pg_hba configuration will be append + # to the default ones to make the cluster work + postgresql: + parameters: + max_worker_processes: "60" + pg_hba: + # To access through TCP/IP you will need to get username + # and password from the secret cluster-example-custom-app + - host all all all md5 + + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + + # Require 1Gi of space per instance using default storage class + storage: + size: 1Gi diff --git a/temp_kubernetes/original/src/samples/cluster-example-epas.yaml b/temp_kubernetes/original/src/samples/cluster-example-epas.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-example-full.yaml b/temp_kubernetes/original/src/samples/cluster-example-full.yaml old mode 100755 new mode 100644 index bd644fa0f9f..71e497e2baf --- a/temp_kubernetes/original/src/samples/cluster-example-full.yaml +++ b/temp_kubernetes/original/src/samples/cluster-example-full.yaml @@ -1,104 +1,104 @@ -# Example of definition of a test cluster using all the elements available -# in the CRD. Please change values appropriately for your environment. -# Remember that you can take advantage of convention over configuration -# and normally you don't need to use all these definitions. - -apiVersion: v1 -data: - password: cGFzc3dvcmQ= -kind: Secret -metadata: - name: cluster-example-app-user -type: kubernetes.io/basic-auth ---- -apiVersion: v1 -data: - password: cGFzc3dvcmQ= -kind: Secret -metadata: - name: cluster-example-superuser -type: kubernetes.io/basic-auth ---- -apiVersion: v1 -kind: Secret -metadata: - name: backup-creds -data: - ACCESS_KEY_ID: a2V5X2lk - ACCESS_SECRET_KEY: c2VjcmV0X2tleQ== ---- -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-full -spec: - description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:13.1 - # imagePullSecret is only required if the images are located in a private registry - # imagePullSecrets: - # - name: private_registry_access - instances: 3 - #licenseKey: insert_valid_license_here - startDelay: 300 - stopDelay: 300 - primaryUpdateStrategy: unsupervised - - postgresql: - parameters: - shared_buffers: 256MB - pg_hba: - - host all all 10.244.0.0/16 md5 - - bootstrap: - initdb: - database: appdb - owner: appuser - secret: - name: cluster-example-app-user - # Alternative bootstrap method: start from a backup - #recovery: - # backup: - # name: backup-example - - superuserSecret: - name: cluster-example-superuser - - storage: - storageClass: standard - size: 1Gi - - backup: - barmanObjectStore: - destinationPath: s3://cluster-example-full-backup/ - endpointURL: http://custom-endpoint:1234 - s3Credentials: - accessKeyId: - name: backup-creds - key: ACCESS_KEY_ID - secretAccessKey: - name: backup-creds - key: ACCESS_SECRET_KEY - wal: - compression: gzip - encryption: AES256 - data: - compression: gzip - encryption: AES256 - immediateCheckpoint: false - jobs: 2 - - resources: - requests: - memory: "512Mi" - cpu: "1" - limits: - memory: "1Gi" - cpu: "2" - - affinity: - enablePodAntiAffinity: true - topologyKey: failure-domain.beta.kubernetes.io/zone - - nodeMaintenanceWindow: - inProgress: false - reusePVC: false +# Example of definition of a test cluster using all the elements available +# in the CRD. Please change values appropriately for your environment. +# Remember that you can take advantage of convention over configuration +# and normally you don't need to use all these definitions. + +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: cluster-example-app-user +type: kubernetes.io/basic-auth +--- +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: cluster-example-superuser +type: kubernetes.io/basic-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: backup-creds +data: + ACCESS_KEY_ID: a2V5X2lk + ACCESS_SECRET_KEY: c2VjcmV0X2tleQ== +--- +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-full +spec: + description: "Example of cluster" + imageName: quay.io/enterprisedb/postgresql:13.2 + # imagePullSecret is only required if the images are located in a private registry + # imagePullSecrets: + # - name: private_registry_access + instances: 3 + #licenseKey: insert_valid_license_here + startDelay: 300 + stopDelay: 300 + primaryUpdateStrategy: unsupervised + + postgresql: + parameters: + shared_buffers: 256MB + pg_hba: + - host all all 10.244.0.0/16 md5 + + bootstrap: + initdb: + database: appdb + owner: appuser + secret: + name: cluster-example-app-user + # Alternative bootstrap method: start from a backup + #recovery: + # backup: + # name: backup-example + + superuserSecret: + name: cluster-example-superuser + + storage: + storageClass: standard + size: 1Gi + + backup: + barmanObjectStore: + destinationPath: s3://cluster-example-full-backup/ + endpointURL: http://custom-endpoint:1234 + s3Credentials: + accessKeyId: + name: backup-creds + key: ACCESS_KEY_ID + secretAccessKey: + name: backup-creds + key: ACCESS_SECRET_KEY + wal: + compression: gzip + encryption: AES256 + data: + compression: gzip + encryption: AES256 + immediateCheckpoint: false + jobs: 2 + + resources: + requests: + memory: "512Mi" + cpu: "1" + limits: + memory: "1Gi" + cpu: "2" + + affinity: + enablePodAntiAffinity: true + topologyKey: failure-domain.beta.kubernetes.io/zone + + nodeMaintenanceWindow: + inProgress: false + reusePVC: false diff --git a/temp_kubernetes/original/src/samples/cluster-example-initdb.yaml b/temp_kubernetes/original/src/samples/cluster-example-initdb.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-example-secret.yaml b/temp_kubernetes/original/src/samples/cluster-example-secret.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-example-syncreplicas.yaml b/temp_kubernetes/original/src/samples/cluster-example-syncreplicas.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-example.yaml b/temp_kubernetes/original/src/samples/cluster-example.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-expose-service.yaml b/temp_kubernetes/original/src/samples/cluster-expose-service.yaml old mode 100755 new mode 100644 index b9aa761bb3c..201b25e1643 --- a/temp_kubernetes/original/src/samples/cluster-expose-service.yaml +++ b/temp_kubernetes/original/src/samples/cluster-expose-service.yaml @@ -1,36 +1,36 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: tcp-services - namespace: ingress-nginx -data: - 5432: default/cluster-example-lead-primary:5432 - ---- -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -spec: - type: LoadBalancer - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - - name: postgres - port: 5432 - targetPort: 5432 - protocol: TCP - selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-services + namespace: ingress-nginx +data: + 5432: default/cluster-example-rw:5432 + +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/temp_kubernetes/original/src/samples/cluster-pvc-template.yaml b/temp_kubernetes/original/src/samples/cluster-pvc-template.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-restore-pitr.yaml b/temp_kubernetes/original/src/samples/cluster-restore-pitr.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-restore.yaml b/temp_kubernetes/original/src/samples/cluster-restore.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-storage-class-with-backup.yaml b/temp_kubernetes/original/src/samples/cluster-storage-class-with-backup.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/cluster-storage-class.yaml b/temp_kubernetes/original/src/samples/cluster-storage-class.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.3.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.3.0.yaml deleted file mode 100755 index 3183d014ed0..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.3.0.yaml +++ /dev/null @@ -1,897 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresqls API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - applicationConfiguration: - description: Configuration from the application point of view - properties: - database: - description: Name of the database used by the application - minLength: 1 - type: string - owner: - description: Name of the owner of the database in the instance to - be used by applications. - minLength: 1 - type: string - required: - - database - - owner - type: object - backup: - description: The configuration to be used for backups - properties: - data: - description: The configuration to be used to backup the data files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to be - done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the backup, - defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if - this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - anyOf: - - type: integer - - type: string - description: Size of the storage. Required if not already specified - in the PVC template. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - required: - - applicationConfiguration - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this bdrGroup and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.3.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.3.0 - name: manager - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.4.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.4.0.yaml deleted file mode 100755 index 9d93a44a6cd..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.4.0.yaml +++ /dev/null @@ -1,1092 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.2ndq.io -spec: - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.2ndq.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - data: - description: The configuration to be used to backup the data files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to be - done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the backup, - defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if - this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - backup - type: object - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: Name of the database used by the application. - type: string - owner: - description: Name of the owner of the database in the instance - to be used by applications. - type: string - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password, if empty - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-2ndq-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.2ndq.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.4.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.4.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-2ndq-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.2ndq.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.5.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.5.0.yaml deleted file mode 100755 index 552cb876750..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.5.0.yaml +++ /dev/null @@ -1,1144 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - - JSONPath: .status.instances - description: Number of instances - name: Instances - type: integer - - JSONPath: .status.readyInstances - description: Number of ready instances - name: Ready - type: integer - - JSONPath: .status.phase - description: Cluster current status - name: Status - type: string - - JSONPath: .status.currentPrimary - description: Primary pod - name: Primary - type: string - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to - be done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the - backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - backup - type: object - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: Name of the database used by the application. - type: string - owner: - description: Name of the owner of the database in the instance - to be used by applications. - type: string - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the "postgres" user inside the image, defaults - to "26" - format: int64 - type: integer - postgresUID: - description: The UID of the "postgres" user inside the image, defaults - to "26" - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password, if empty - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - type: string - phaseReason: - type: string - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.5.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.5.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.6.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.6.0.yaml deleted file mode 100755 index cec5851ee47..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.6.0.yaml +++ /dev/null @@ -1,1247 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - - JSONPath: .status.instances - description: Number of instances - name: Instances - type: integer - - JSONPath: .status.readyInstances - description: Number of ready instances - name: Ready - type: integer - - JSONPath: .status.phase - description: Cluster current status - name: Status - type: string - - JSONPath: .status.currentPrimary - description: Primary pod - name: Primary - type: string - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, according - to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that). Allowed - options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the backup - initial checkpoint will be limited, according to the `checkpoint_completion_target` - setting on the PostgreSQL server. If set to true, an immediate - checkpoint will be used, meaning PostgreSQL will complete - the checkpoint as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to upload - the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and - may be unencrypted in the object store, according to the bucket - default policy. - properties: - compression: - description: Compress a WAL file before sending it to the - object store. Available options are empty string (no compression, - default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that). Allowed - options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the `database` - key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as a - consistent state is reached: in this case that means at the - end of a backup. This option allows to fine tune the recovery - process' - properties: - exclusive: - description: Set the target to be exclusive (defaults to - true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created with - `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" or - a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please refer - to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not defined - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.6.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.6.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.7.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.7.0.yaml deleted file mode 100755 index fd222552ecb..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.7.0.yaml +++ /dev/null @@ -1,1262 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: webhook-service - namespace: system - path: /convert - conversionReviewVersions: - - v1alpha1 - - v1beta1 - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new - volume and data will be restored to the volume at the same - time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the provisioner - may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.7.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.7.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-0.8.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-0.8.0.yaml deleted file mode 100755 index e2b5ef1ebb1..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-0.8.0.yaml +++ /dev/null @@ -1,2362 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: false - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: mbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: mbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.8.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.8.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: vbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: vbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None diff --git a/temp_kubernetes/original/src/samples/postgresql-operator-1.0.0.yaml b/temp_kubernetes/original/src/samples/postgresql-operator-1.0.0.yaml deleted file mode 100755 index 7cc13552331..00000000000 --- a/temp_kubernetes/original/src/samples/postgresql-operator-1.0.0.yaml +++ /dev/null @@ -1,2362 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: false - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: mbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: mbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:1.0.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:1.0.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: vbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: vbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None diff --git a/temp_kubernetes/original/src/samples/scheduled-backup-example.yaml b/temp_kubernetes/original/src/samples/scheduled-backup-example.yaml old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/samples/subscription.yaml b/temp_kubernetes/original/src/samples/subscription.yaml old mode 100755 new mode 100644 index 7035da016b7..20ff6874ee6 --- a/temp_kubernetes/original/src/samples/subscription.yaml +++ b/temp_kubernetes/original/src/samples/subscription.yaml @@ -1,10 +1,10 @@ -apiVersion: operators.coreos.com/v1 -kind: Subscription -metadata: - name: cloud-native-postgresql - namespace: openshift-operators -spec: - channel: beta - name: cloud-native-postgresql - source: certified-operators - sourceNamespace: openshift-marketplace +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cloud-native-postgresql + namespace: openshift-operators +spec: + channel: stable + name: cloud-native-postgresql + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/temp_kubernetes/original/src/security.md b/temp_kubernetes/original/src/security.md old mode 100755 new mode 100644 index 0858a28efdd..3dce0c06863 --- a/temp_kubernetes/original/src/security.md +++ b/temp_kubernetes/original/src/security.md @@ -1,106 +1,169 @@ -# Security - -This section contains information about security for Cloud Native PostgreSQL, -from a few standpoints: source code, Kubernetes, and PostgreSQL. - -!!! Warning - The information contained in this page must not exonerate you from - performing regular InfoSec duties on your Kubernetes cluster. - -## Source code static analysis - -Source code of Cloud Native PostgreSQL is *systematically scanned* for static analysis purposes, -including **security problems**, using a popular open-source for Go called -[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline. -GolangCI-Lint can run several *linters* on the same source code. - -One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`, -a linter that scans the abstract syntactic tree of the source against a set of rules aimed at -the discovery of well-known vulnerabilities, threats, and weaknesses hidden in -the code such as hard-coded credentials, integer overflows and SQL injections - to name a few. - -!!! Important - A failure in the static code analysis phase of the CI/CD pipeline is a blocker - for the entire delivery of Cloud Native PostgreSQL, meaning that each commit is validated - against all the linters defined by GolangCI-Lint. - -Source code is also regularly inspected through [Coverity Scan by Synopsys](https://scan.coverity.com/) -via EnterpriseDB's internal CI/CD pipeline. - -## Kubernetes - -### Pod Security Policies - -A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) -is the Kubernetes way to define security rules and specifications that a pod needs to meet -to run in a cluster. -For InfoSec reasons, every Kubernetes platform should implement them. - -Cloud Native PostgreSQL does not require *privileged* mode for containers execution. -PostgreSQL servers run as `postgres` system user. No component whatsoever requires to run as `root`. - -Likewise, Volumes access does not require *privileges* mode or `root` privileges either. -Proper permissions must be properly assigned by the Kubernetes platform and/or administrators. - -### Network Policies - -The pods created by the `Cluster` resource can be controlled by Kubernetes -[network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -to enable/disable inbound and outbound network access at IP and TCP level. - -Network policies are beyond the scope of this document. -Please refer to the ["Network policies"](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -section of the Kubernetes documentation for further information. - -### Resources - -In a typical Kubernetes cluster, containers run with unlimited resources. By default, -they might be allowed to use as much CPU and RAM as needed. - -Cloud Native PostgreSQL allows administrators to control and manage resource usage by the pods of the cluster, -through the `resources` section of the manifest, with two knobs: - -- `requests`: initial requirement -- `limits`: maximum usage, in case of dynamic increase of resource needs - -For example, you can request an initial amount of RAM of 32MiB (scalable to 128MiB) and 50m of CPU (scalable to 100m) as follows: - -```yaml - resources: - requests: - memory: "32Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" -``` - -[//]: # ( TODO: we may want to explain what happens to a pod that exceedes the resource limits: CPU -> trottle; MEMORY -> kill ) - -!!! Seealso "Managing Compute Resources for Containers" - For more details on resource management, please refer to the - ["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) - page from the Kubernetes documentation. - -## PostgreSQL - -The current implementation of Cloud Native PostgreSQL automatically creates -passwords and `.pgpass` files for the `postgres` superuser and the database owner. -See the ["Secrets" section in the "Architecture" page](architecture.md#secrets). - -You can use those files to configure application access to the database. - -By default, every replica is automatically configured to connect in **physical -async streaming replication** with the current primary instance, with a special -user called `streaming_replica`. The connection between nodes is **encrypted** -and authentication is via **TLS client certificates**. - -Currently, the operator allows administrators to add `pg_hba.conf` lines directly in the manifest -as part of the `pg_hba` section of the `postgresql` configuration. The lines defined in the -manifest are added to a default `pg_hba.conf`. - -For further detail on how `pg_hba.conf` is managed by the operator, see the -["PostgreSQL Configuration" page](postgresql_conf.md#the-pg_hba-section) of the documentation. - -!!! Important - Examples assume that the Kubernetes cluster runs in a private and secure network. +# Security + +This section contains information about security for Cloud Native PostgreSQL, +from a few standpoints: source code, Kubernetes, and PostgreSQL. + +!!! Warning + The information contained in this page must not exonerate you from + performing regular InfoSec duties on your Kubernetes cluster. + +## Source code static analysis + +Source code of Cloud Native PostgreSQL is *systematically scanned* for static analysis purposes, +including **security problems**, using a popular open-source for Go called +[GolangCI-Lint](https://github.com/golangci/golangci-lint) directly in the CI/CD pipeline. +GolangCI-Lint can run several *linters* on the same source code. + +One of these is [Golang Security Checker](https://github.com/securego/gosec), or simply `gosec`, +a linter that scans the abstract syntactic tree of the source against a set of rules aimed at +the discovery of well-known vulnerabilities, threats, and weaknesses hidden in +the code such as hard-coded credentials, integer overflows and SQL injections - to name a few. + +!!! Important + A failure in the static code analysis phase of the CI/CD pipeline is a blocker + for the entire delivery of Cloud Native PostgreSQL, meaning that each commit is validated + against all the linters defined by GolangCI-Lint. + +Source code is also regularly inspected through [Coverity Scan by Synopsys](https://scan.coverity.com/) +via EnterpriseDB's internal CI/CD pipeline. + +## Kubernetes + +### Pod Security Policies + +A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) +is the Kubernetes way to define security rules and specifications that a pod needs to meet +to run in a cluster. +For InfoSec reasons, every Kubernetes platform should implement them. + +Cloud Native PostgreSQL does not require *privileged* mode for containers execution. +PostgreSQL servers run as `postgres` system user. No component whatsoever requires to run as `root`. + +Likewise, Volumes access does not require *privileges* mode or `root` privileges either. +Proper permissions must be properly assigned by the Kubernetes platform and/or administrators. + +### Network Policies + +The pods created by the `Cluster` resource can be controlled by Kubernetes +[network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +to enable/disable inbound and outbound network access at IP and TCP level. + +Network policies are beyond the scope of this document. +Please refer to the ["Network policies"](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +section of the Kubernetes documentation for further information. + +### Resources + +In a typical Kubernetes cluster, containers run with unlimited resources. By default, +they might be allowed to use as much CPU and RAM as needed. + +Cloud Native PostgreSQL allows administrators to control and manage resource usage by the pods of the cluster, +through the `resources` section of the manifest, with two knobs: + +- `requests`: initial requirement +- `limits`: maximum usage, in case of dynamic increase of resource needs + +For example, you can request an initial amount of RAM of 32MiB (scalable to 128MiB) and 50m of CPU (scalable to 100m) as follows: + +```yaml + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" +``` + +Memory requests and limits are associated with containers, but it is useful to think of a pod as having a memory request +and limit. The memory request for the pod is the sum of the memory requests for all the containers in the pod. + +Pod scheduling is based on requests and not limits. A pod is scheduled to run on a Node only if the Node has enough +available memory to satisfy the pod's memory request. + +For each resource, we divide containers into 3 Quality of Service (QoS) classes, in decreasing order of priority: + +- *Guaranteed* +- *Burstable* +- *Best-Effort* + +For more details, please refer to the ["Configure Quality of Service for Pods"](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#qos-classes) section in the Kubernetes documentation. + +For a PostgreSQL workload it is recommended to set a "Guaranteed" QoS. + +In order to avoid resources related issues in Kubernetes, we can refer to the best practices for "out of resource" handling while creating +a cluster: + +- Specify your required values for memory and CPU in the resources section of the manifest file. + This way you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other resources + related issues on running instances. +- In order for the pods of your cluster to get assigned to the "Guaranteed" QoS class, you must set limits and requests + for both memory and CPU to the same value. +- Specify your required PostgreSQL memory parameters consistently with the pod resources (like you would do in a VM or physical machine scenario - see below). +- Set up database server pods on a dedicated node using nodeSelector. + See the ["nodeSelector field of the affinityconfiguration resource on the API reference page"](api_reference.md#affinityconfiguration). + +You can refer the following example manifest: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: postgresql-resources +spec: + + instances: 3 + + postgresql: + parameters: + shared_buffers: "256MB" + + resources: + requests: + memory: "1024Mi" + cpu: 1 + limits: + memory: "1024Mi" + cpu: 1 + + storage: + size: 1Gi +``` + +In the above example, we have specified `shared_buffers` parameter with a value of `256MB` - i.e. how much memory is +dedicated to the PostgreSQL server for caching data (the default value for this parameter is `128MB` in case it's not defined). + +A reasonable starting value for `shared_buffers` is 25% of the memory in your system. +For example: if your `shared_buffers` is 256 MB, then the recommended value for your container memory size is 1 GB, +which means that within a pod all the containers will have a total of 1 GB memory that Kubernetes will always preserve, +enabling our containers to work as expected. +For more details, please refer to the ["Resource Consumption"](https://www.postgresql.org/docs/current/runtime-config-resource.html) +section in the PostgreSQL documentation. + +!!! See also "Managing Compute Resources for Containers" + For more details on resource management, please refer to the + ["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) + page from the Kubernetes documentation. + +## PostgreSQL + +The current implementation of Cloud Native PostgreSQL automatically creates +passwords and `.pgpass` files for the `postgres` superuser and the database owner. +See the ["Secrets" section in the "Architecture" page](architecture.md#secrets). + +You can use those files to configure application access to the database. + +By default, every replica is automatically configured to connect in **physical +async streaming replication** with the current primary instance, with a special +user called `streaming_replica`. The connection between nodes is **encrypted** +and authentication is via **TLS client certificates**. + +Currently, the operator allows administrators to add `pg_hba.conf` lines directly in the manifest +as part of the `pg_hba` section of the `postgresql` configuration. The lines defined in the +manifest are added to a default `pg_hba.conf`. + +For further detail on how `pg_hba.conf` is managed by the operator, see the +["PostgreSQL Configuration" page](postgresql_conf.md#the-pg_hba-section) of the documentation. + +!!! Important + Examples assume that the Kubernetes cluster runs in a private and secure network. diff --git a/temp_kubernetes/original/src/ssl_connections.md b/temp_kubernetes/original/src/ssl_connections.md old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/storage.md b/temp_kubernetes/original/src/storage.md old mode 100755 new mode 100644 diff --git a/temp_kubernetes/original/src/use_cases.md b/temp_kubernetes/original/src/use_cases.md old mode 100755 new mode 100644 From 770b3279f611767afb5bfee79d5f4447d1da94ce Mon Sep 17 00:00:00 2001 From: George Song Date: Wed, 3 Mar 2021 11:15:05 -0800 Subject: [PATCH 05/28] feat: transpile cnp v1.1.0 docs Former-commit-id: f7b311a27d3f6b04a197f3f3d72cae85c42ae406 --- .../cloud_native_operator/architecture.mdx | 9 + .../before_you_start.mdx | 2 +- .../cloud_native_operator/cnp-plugin.mdx | 144 + .../cloud_native_operator/credits.mdx | 14 +- .../kubernetes/cloud_native_operator/e2e.mdx | 2 +- .../expose_pg_services.mdx | 2 +- .../cloud_native_operator/failure_modes.mdx | 14 +- .../images/apps-in-k8s.png | 0 .../images/apps-outside-k8s.png | 0 .../images/architecture-in-k8s.png | 0 .../images/architecture-r.png | 0 .../images/architecture-rw.png | 0 .../images/network-storage-architecture.png | 0 .../images/operator-capability-level.png | 0 ...cloud-architecture-storage-replication.png | 0 .../images/public-cloud-architecture.png | 0 .../images/shared-nothing-architecture.png | 0 .../cloud_native_operator/index.mdx | 1 + .../cloud_native_operator/installation.mdx | 4 +- .../kubernetes_upgrade.mdx | 2 +- .../operator_capability_levels.mdx | 4 +- .../cloud_native_operator/rolling_update.mdx | 2 +- .../samples/backup-example.yaml | 0 .../samples/cluster-example-custom.yaml | 53 +- .../samples/cluster-example-epas.yaml | 0 .../samples/cluster-example-full.yaml | 208 +- .../samples/cluster-example-initdb.yaml | 0 .../samples/cluster-example-secret.yaml | 0 .../samples/cluster-example-syncreplicas.yaml | 0 .../samples/cluster-example.yaml | 0 .../samples/cluster-expose-service.yaml | 72 +- .../samples/cluster-pvc-template.yaml | 0 .../samples/cluster-restore-pitr.yaml | 0 .../samples/cluster-restore.yaml | 0 .../cluster-storage-class-with-backup.yaml | 0 .../samples/cluster-storage-class.yaml | 0 .../samples/postgresql-operator-0.3.0.yaml | 897 ------- .../samples/postgresql-operator-0.4.0.yaml | 1092 -------- .../samples/postgresql-operator-0.5.0.yaml | 1144 -------- .../samples/postgresql-operator-0.6.0.yaml | 1247 --------- .../samples/postgresql-operator-0.7.0.yaml | 1262 --------- .../samples/postgresql-operator-0.8.0.yaml | 2362 ----------------- .../samples/postgresql-operator-1.0.0.yaml | 2362 ----------------- .../samples/scheduled-backup-example.yaml | 0 .../samples/subscription.yaml | 20 +- .../cloud_native_operator/security.mdx | 67 +- 46 files changed, 419 insertions(+), 10567 deletions(-) create mode 100644 advocacy_docs/kubernetes/cloud_native_operator/cnp-plugin.mdx mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/apps-in-k8s.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/apps-outside-k8s.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/architecture-in-k8s.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/architecture-r.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/architecture-rw.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/network-storage-architecture.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/operator-capability-level.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture-storage-replication.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/images/shared-nothing-architecture.png mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/backup-example.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-custom.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-epas.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-full.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-initdb.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-secret.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-syncreplicas.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-expose-service.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-pvc-template.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore-pitr.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class-with-backup.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.3.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.4.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.5.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.6.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.7.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.8.0.yaml delete mode 100755 advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-1.0.0.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/scheduled-backup-example.yaml mode change 100755 => 100644 advocacy_docs/kubernetes/cloud_native_operator/samples/subscription.yaml diff --git a/advocacy_docs/kubernetes/cloud_native_operator/architecture.mdx b/advocacy_docs/kubernetes/cloud_native_operator/architecture.mdx index 26bf91d2dae..3f8c87dd562 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/architecture.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/architecture.mdx @@ -13,6 +13,7 @@ Cloud Native PostgreSQL currently supports clusters based on asynchronous and sy * One primary, with optional multiple hot standby replicas for High Availability * Available services for applications: * `-rw`: applications connect to the only primary instance of the cluster + * `-ro`: applications connect to the only hot standby replicas for read-only-workloads * `-r`: applications connect to any of the instances for read-only workloads * Shared-nothing architecture recommended for better resilience of the PostgreSQL cluster: * PostgreSQL instances should reside on different Kubernetes worker nodes and share only the network @@ -45,12 +46,17 @@ The following diagram shows the architecture: ![Applications reading from any instance in round robin](./images/architecture-r.png) +Applications can also access hot standby replicas through the `-ro` service made available +by the operator. This service enables the application to offload read-only queries from the +primary node. + ## Application deployments Applications are supposed to work with the services created by Cloud Native PostgreSQL in the same Kubernetes cluster: * `[cluster name]-rw` +* `[cluster name]-ro` * `[cluster name]-r` Those services are entirely managed by the Kubernetes cluster and @@ -97,6 +103,9 @@ you can use the following environment variables in your applications: * `PG_DATABASE_R_SERVICE_HOST`: the IP address of the service pointing to all the PostgreSQL instances for read-only workloads +* `PG_DATABASE_RO_SERVICE_HOST`: the IP address of the + service pointing to all hot-standby replicas of the cluster + * `PG_DATABASE_RW_SERVICE_HOST`: the IP address of the service pointing to the *primary* instance of the cluster diff --git a/advocacy_docs/kubernetes/cloud_native_operator/before_you_start.mdx b/advocacy_docs/kubernetes/cloud_native_operator/before_you_start.mdx index 1d78a46bef2..6b9d5f3a4f9 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/before_you_start.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/before_you_start.mdx @@ -11,7 +11,7 @@ specific to Kubernetes and PostgreSQL. | Resource | Description | |-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the master(s). | +| [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) | A *node* is a worker machine in Kubernetes, either virtual or physical, where all services necessary to run pods are managed by the control plane node(s). | | [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) | A *pod* is the smallest computing unit that can be deployed in a Kubernetes cluster and is composed of one or more containers that share network and storage. | | [Service](https://kubernetes.io/docs/concepts/services-networking/service/) | A *service* is an abstraction that exposes as a network service an application that runs on a group of pods and standardizes important features such as service discovery across applications, load balancing, failover, and so on. | | [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) | A *secret* is an object that is designed to store small amounts of sensitive data such as passwords, access keys, or tokens, and use them in pods. | diff --git a/advocacy_docs/kubernetes/cloud_native_operator/cnp-plugin.mdx b/advocacy_docs/kubernetes/cloud_native_operator/cnp-plugin.mdx new file mode 100644 index 00000000000..a111620e663 --- /dev/null +++ b/advocacy_docs/kubernetes/cloud_native_operator/cnp-plugin.mdx @@ -0,0 +1,144 @@ +--- +title: 'Cloud Native PostgreSQL Plugin' +originalFilePath: 'src/cnp-plugin.md' +product: 'Cloud Native Operator' +--- + +Cloud Native PostgreSQL provides a plugin for `kubectl` to manage a cluster in Kubernetes. +The plugin also works with `oc` in an OpenShift environment. + +## Install + +You can install the plugin in your system with: + +```sh +curl -sSfL \ + https://github.com/EnterpriseDB/kubectl-cnp/raw/main/install.sh | \ + sudo sh -s -- -b /usr/local/bin +``` + +## Use + +Once the plugin was installed and deployed, you can start using it like this: + +```shell +kubectl cnp +``` + +### Status + +The `status` command provides a brief of the current status of your cluster. + +```shell +kubectl cnp status cluster-example +``` + +```shell +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13 +Primary instance: cluster-example-1 +Instances: 3 +Ready instances: 3 + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- +cluster-example-1 0/6000060 6927251808674721812 ✓ ✗ ✗ ✗ +cluster-example-2 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +cluster-example-3 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ + +``` + +You can also get a more verbose version of the status by adding `--verbose` or just `-v` + +```shell +kubectl cnp status cluster-example --verbose +``` + +```shell +Cluster in healthy state +Name: cluster-example +Namespace: default +PostgreSQL Image: quay.io/enterprisedb/postgresql:13 +Primary instance: cluster-example-1 +Instances: 3 +Ready instances: 3 + +PostgreSQL Configuration +archive_command = '/controller/manager wal-archive %p' +archive_mode = 'on' +archive_timeout = '5min' +full_page_writes = 'on' +hot_standby = 'true' +listen_addresses = '*' +logging_collector = 'off' +max_parallel_workers = '32' +max_replication_slots = '32' +max_worker_processes = '32' +port = '5432' +ssl = 'on' +ssl_ca_file = '/tmp/ca.crt' +ssl_cert_file = '/tmp/server.crt' +ssl_key_file = '/tmp/server.key' +unix_socket_directories = '/var/run/postgresql' +wal_keep_size = '512MB' +wal_level = 'logical' +wal_log_hints = 'on' + + +PostgreSQL HBA Rules +# Grant local access +local all all peer + +# Require client certificate authentication for the streaming_replica user +hostssl postgres streaming_replica all cert clientcert=1 +hostssl replication streaming_replica all cert clientcert=1 + +# Otherwise use md5 authentication +host all all all md5 + + +Instances status +Pod name Current LSN Received LSN Replay LSN System ID Primary Replicating Replay paused Pending restart +-------- ----------- ------------ ---------- --------- ------- ----------- ------------- --------------- +cluster-example-1 0/6000060 6927251808674721812 ✓ ✗ ✗ ✗ +cluster-example-2 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +cluster-example-3 0/6000060 0/6000060 6927251808674721812 ✗ ✓ ✗ ✗ +``` + +The command also supports output in `yaml` and `json` format. + +### Promote + +The meaning of this command is to `promote` a pod in the cluster to primary, so you +can start with maintenance work or test a switch-over situation in your cluster + +```shell +kubectl cnp promote cluster-example cluster-example-2 +``` + +### Certificates + +Clusters created using the Cloud Native PostgreSQL operator work with a CA to sign +a TLS authentication certificate. + +To get a certificate, you need to provide a name for the secret to store +the credentials, the cluster name, and a user for this certificate + +```shell +kubectl cnp certificate cluster-cert --cnp-cluster cluster-example --cnp-user appuser +``` + +After the secrete it's created, you can get it using `kubectl` + +```shell +kubectl get secret cluster-cert +``` + +And the content of the same in plain text using the following commands: + +```shell +kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]' +``` diff --git a/advocacy_docs/kubernetes/cloud_native_operator/credits.mdx b/advocacy_docs/kubernetes/cloud_native_operator/credits.mdx index 99c84eb872b..2597cd81bb5 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/credits.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/credits.mdx @@ -7,13 +7,15 @@ product: 'Cloud Native Operator' Cloud Native PostgreSQL (Operator for Kubernetes/OpenShift) has been designed, developed, and tested by the EnterpriseDB Cloud Native team: -- Leonardo Cecchi -- Marco Nenciarini -- Jonathan Gonzalez -- Francesco Canovai +- Gabriele Bartolini - Jonathan Battiato +- Francesco Canovai +- Leonardo Cecchi +- Valerio Del Sarto - Niccolò Fei -- Devin Nemec +- Jonathan Gonzalez +- Danish Khan +- Marco Nenciarini +- Jitendra Wadle - Adam Wright -- Gabriele Bartolini diff --git a/advocacy_docs/kubernetes/cloud_native_operator/e2e.mdx b/advocacy_docs/kubernetes/cloud_native_operator/e2e.mdx index 5fa15fb98c9..668c943cef4 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/e2e.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/e2e.mdx @@ -32,7 +32,7 @@ and the following suite of E2E tests are performed on that cluster: * Installation of the operator; * Creation of a `Cluster`; * Usage of a persistent volume for data storage; -* Connection via services; +* Connection via services, including read-only; * Scale-up of a `Cluster`; * Scale-down of a `Cluster`; * Failover; diff --git a/advocacy_docs/kubernetes/cloud_native_operator/expose_pg_services.mdx b/advocacy_docs/kubernetes/cloud_native_operator/expose_pg_services.mdx index c8a90f4f27e..bca1c0e31ce 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/expose_pg_services.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/expose_pg_services.mdx @@ -8,7 +8,7 @@ This section explains how to expose a PostgreSQL service externally, allowing ac to your PostgreSQL database **from outside your Kubernetes cluster** using NGINX Ingress Controller. -If you followed the [QuickStart](/quickstart), you should have by now +If you followed the [QuickStart](./quickstart.md), you should have by now a database that can be accessed inside the cluster via the `cluster-example-rw` (primary) and `cluster-example-r` (read-only) services in the `default` namespace. Both services use port `5432`. diff --git a/advocacy_docs/kubernetes/cloud_native_operator/failure_modes.mdx b/advocacy_docs/kubernetes/cloud_native_operator/failure_modes.mdx index bf223a1c1cc..38c30061989 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/failure_modes.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/failure_modes.mdx @@ -131,25 +131,21 @@ Self-healing will happen after `tolerationSeconds`. ## Self-healing -If the failed pod is a standby, the pod is removed from the `-r` service. +If the failed pod is a standby, the pod is removed from the `-r` service +and from the `-ro` service. The pod is then restarted using its PVC if available; otherwise, a new pod will be created from a backup of the current primary. The pod -will be added again to the `-r` service when ready. +will be added again to the `-r` service and to the `-ro` service when ready. If the failed pod is the primary, the operator will promote the active pod with status ready and the lowest replication lag, then point the `-rw`service -to it. The failed pod will be removed from the `-r` service. +to it. The failed pod will be removed from the `-r` service and from the +`-ro` service. Other standbys will start replicating from the new primary. The former primary will use `pg_rewind` to synchronize itself with the new one if its PVC is available; otherwise, a new standby will be created from a backup of the current primary. -!!! Important - Due to a [bug in PostgreSQL 13 streaming replication](https://www.postgresql.org/message-id/flat/20201209.174314.282492377848029776.horikyota.ntt%40gmail.com) - it is not guaranteed that an existing standby is able to follow a promoted - primary, even if the new primary contains all the required WALs. Standbys - will be able to follow a primary if WAL archiving is configured. - ## Manual intervention In the case of undocumented failure, it might be necessary to intervene diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/apps-in-k8s.png b/advocacy_docs/kubernetes/cloud_native_operator/images/apps-in-k8s.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/apps-outside-k8s.png b/advocacy_docs/kubernetes/cloud_native_operator/images/apps-outside-k8s.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-in-k8s.png b/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-in-k8s.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-r.png b/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-r.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-rw.png b/advocacy_docs/kubernetes/cloud_native_operator/images/architecture-rw.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/network-storage-architecture.png b/advocacy_docs/kubernetes/cloud_native_operator/images/network-storage-architecture.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/operator-capability-level.png b/advocacy_docs/kubernetes/cloud_native_operator/images/operator-capability-level.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture-storage-replication.png b/advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture-storage-replication.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture.png b/advocacy_docs/kubernetes/cloud_native_operator/images/public-cloud-architecture.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/images/shared-nothing-architecture.png b/advocacy_docs/kubernetes/cloud_native_operator/images/shared-nothing-architecture.png old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/index.mdx b/advocacy_docs/kubernetes/cloud_native_operator/index.mdx index 560fdd4dc03..0c089b8df5f 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/index.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/index.mdx @@ -28,6 +28,7 @@ navigation: - ssl_connections - kubernetes_upgrade - e2e + - cnp-plugin - license_keys - container_images - operator_capability_levels diff --git a/advocacy_docs/kubernetes/cloud_native_operator/installation.mdx b/advocacy_docs/kubernetes/cloud_native_operator/installation.mdx index 3da27118e4a..3c77f19faad 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/installation.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/installation.mdx @@ -9,12 +9,12 @@ product: 'Cloud Native Operator' The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](../samples/postgresql-operator-1.0.0.yaml) +You can install the [latest operator manifest](https://get.enterprisedb.io/cnp/postgresql-operator-1.1.0.yaml) as follows: ```sh kubectl apply -f \ - https://docs.enterprisedb.io/cloud-native-postgresql/latest/samples/postgresql-operator-1.0.0.yaml + https://get.enterprisedb.io/cnp/postgresql-operator-1.1.0.yaml ``` Once you have run the `kubectl` command, Cloud Native PostgreSQL will be installed in your Kubernetes cluster. diff --git a/advocacy_docs/kubernetes/cloud_native_operator/kubernetes_upgrade.mdx b/advocacy_docs/kubernetes/cloud_native_operator/kubernetes_upgrade.mdx index cc3c7d0f2f2..5f54cc93609 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/kubernetes_upgrade.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/kubernetes_upgrade.mdx @@ -80,7 +80,7 @@ When **disabled**, Kubernetes forces the recreation of the Pod on a different node with a new PVC by relying on PostgreSQL's physical streaming replication, then destroys the old PVC together with the Pod. This scenario is generally -not recommended unless the database's size is small, and recloning +not recommended unless the database's size is small, and re-cloning the new PostgreSQL instance takes shorter than waiting. !!! Note diff --git a/advocacy_docs/kubernetes/cloud_native_operator/operator_capability_levels.mdx b/advocacy_docs/kubernetes/cloud_native_operator/operator_capability_levels.mdx index 181c9944f5a..79635f67015 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/operator_capability_levels.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/operator_capability_levels.mdx @@ -70,7 +70,7 @@ PostgreSQL instance and to reconcile the pod status with the instance itself based on the PostgreSQL cluster topology. The instance manager also starts a web server that is invoked by the `kubelet` for probes. Unix signals invoked by the `kubelet` are filtered by the instance manager and, where appropriate, -forwarded to the `postmaster` process for fast and controlled reactions to +forwarded to the `postgres` process for fast and controlled reactions to external events. The instance manager is written in Go and has no external dependencies. @@ -374,7 +374,7 @@ for PostgreSQL have been implemented. ### Kubernetes events Record major events as expected by the Kubernetes API, such as creating resources, -removing nodes, upgrading, and so on. Events can be displayed throught +removing nodes, upgrading, and so on. Events can be displayed through the `kubectl describe` and `kubectl get events` command. ## Level 5 - Auto Pilot diff --git a/advocacy_docs/kubernetes/cloud_native_operator/rolling_update.mdx b/advocacy_docs/kubernetes/cloud_native_operator/rolling_update.mdx index 2c925eac978..8bef6cedd15 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/rolling_update.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/rolling_update.mdx @@ -39,7 +39,7 @@ managed by the `primaryUpdateStrategy` option, accepting these two values: The default and recommended value is `switchover`. The upgrade keeps the Cloud Native PostgreSQL identity and does not -reclone the data. Pods will be deleted and created again with the same PVCs. +re-clone the data. Pods will be deleted and created again with the same PVCs. During the rolling update procedure, the services endpoints move to reflect the cluster's status, so the applications ignore the node that diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/backup-example.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/backup-example.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-custom.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-custom.yaml old mode 100755 new mode 100644 index 1878cf7e402..49223affa68 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-custom.yaml +++ b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-custom.yaml @@ -1,25 +1,28 @@ -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-custom -spec: - instances: 3 - - # Parameters and pg_hba configuration will be append - # to the default ones to make the cluster work - postgresql: - parameters: - max_worker_processes: "60" - pg_hba: - - host all all all trust - - # Example of rolling update strategy: - # - unsupervised: automated update of the primary once all - # replicas have been upgraded (default) - # - supervised: requires manual supervision to perform - # the switchover of the primary - primaryUpdateStrategy: unsupervised - - # Require 1Gi of space per instance using default storage class - storage: - size: 1Gi +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-custom +spec: + instances: 3 + + # Parameters and pg_hba configuration will be append + # to the default ones to make the cluster work + postgresql: + parameters: + max_worker_processes: "60" + pg_hba: + # To access through TCP/IP you will need to get username + # and password from the secret cluster-example-custom-app + - host all all all md5 + + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + + # Require 1Gi of space per instance using default storage class + storage: + size: 1Gi diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-epas.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-epas.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-full.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-full.yaml old mode 100755 new mode 100644 index bd644fa0f9f..71e497e2baf --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-full.yaml +++ b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-full.yaml @@ -1,104 +1,104 @@ -# Example of definition of a test cluster using all the elements available -# in the CRD. Please change values appropriately for your environment. -# Remember that you can take advantage of convention over configuration -# and normally you don't need to use all these definitions. - -apiVersion: v1 -data: - password: cGFzc3dvcmQ= -kind: Secret -metadata: - name: cluster-example-app-user -type: kubernetes.io/basic-auth ---- -apiVersion: v1 -data: - password: cGFzc3dvcmQ= -kind: Secret -metadata: - name: cluster-example-superuser -type: kubernetes.io/basic-auth ---- -apiVersion: v1 -kind: Secret -metadata: - name: backup-creds -data: - ACCESS_KEY_ID: a2V5X2lk - ACCESS_SECRET_KEY: c2VjcmV0X2tleQ== ---- -apiVersion: postgresql.k8s.enterprisedb.io/v1 -kind: Cluster -metadata: - name: cluster-example-full -spec: - description: "Example of cluster" - imageName: quay.io/enterprisedb/postgresql:13.1 - # imagePullSecret is only required if the images are located in a private registry - # imagePullSecrets: - # - name: private_registry_access - instances: 3 - #licenseKey: insert_valid_license_here - startDelay: 300 - stopDelay: 300 - primaryUpdateStrategy: unsupervised - - postgresql: - parameters: - shared_buffers: 256MB - pg_hba: - - host all all 10.244.0.0/16 md5 - - bootstrap: - initdb: - database: appdb - owner: appuser - secret: - name: cluster-example-app-user - # Alternative bootstrap method: start from a backup - #recovery: - # backup: - # name: backup-example - - superuserSecret: - name: cluster-example-superuser - - storage: - storageClass: standard - size: 1Gi - - backup: - barmanObjectStore: - destinationPath: s3://cluster-example-full-backup/ - endpointURL: http://custom-endpoint:1234 - s3Credentials: - accessKeyId: - name: backup-creds - key: ACCESS_KEY_ID - secretAccessKey: - name: backup-creds - key: ACCESS_SECRET_KEY - wal: - compression: gzip - encryption: AES256 - data: - compression: gzip - encryption: AES256 - immediateCheckpoint: false - jobs: 2 - - resources: - requests: - memory: "512Mi" - cpu: "1" - limits: - memory: "1Gi" - cpu: "2" - - affinity: - enablePodAntiAffinity: true - topologyKey: failure-domain.beta.kubernetes.io/zone - - nodeMaintenanceWindow: - inProgress: false - reusePVC: false +# Example of definition of a test cluster using all the elements available +# in the CRD. Please change values appropriately for your environment. +# Remember that you can take advantage of convention over configuration +# and normally you don't need to use all these definitions. + +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: cluster-example-app-user +type: kubernetes.io/basic-auth +--- +apiVersion: v1 +data: + password: cGFzc3dvcmQ= +kind: Secret +metadata: + name: cluster-example-superuser +type: kubernetes.io/basic-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: backup-creds +data: + ACCESS_KEY_ID: a2V5X2lk + ACCESS_SECRET_KEY: c2VjcmV0X2tleQ== +--- +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: cluster-example-full +spec: + description: "Example of cluster" + imageName: quay.io/enterprisedb/postgresql:13.2 + # imagePullSecret is only required if the images are located in a private registry + # imagePullSecrets: + # - name: private_registry_access + instances: 3 + #licenseKey: insert_valid_license_here + startDelay: 300 + stopDelay: 300 + primaryUpdateStrategy: unsupervised + + postgresql: + parameters: + shared_buffers: 256MB + pg_hba: + - host all all 10.244.0.0/16 md5 + + bootstrap: + initdb: + database: appdb + owner: appuser + secret: + name: cluster-example-app-user + # Alternative bootstrap method: start from a backup + #recovery: + # backup: + # name: backup-example + + superuserSecret: + name: cluster-example-superuser + + storage: + storageClass: standard + size: 1Gi + + backup: + barmanObjectStore: + destinationPath: s3://cluster-example-full-backup/ + endpointURL: http://custom-endpoint:1234 + s3Credentials: + accessKeyId: + name: backup-creds + key: ACCESS_KEY_ID + secretAccessKey: + name: backup-creds + key: ACCESS_SECRET_KEY + wal: + compression: gzip + encryption: AES256 + data: + compression: gzip + encryption: AES256 + immediateCheckpoint: false + jobs: 2 + + resources: + requests: + memory: "512Mi" + cpu: "1" + limits: + memory: "1Gi" + cpu: "2" + + affinity: + enablePodAntiAffinity: true + topologyKey: failure-domain.beta.kubernetes.io/zone + + nodeMaintenanceWindow: + inProgress: false + reusePVC: false diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-initdb.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-initdb.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-secret.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-secret.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-syncreplicas.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example-syncreplicas.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-example.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-expose-service.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-expose-service.yaml old mode 100755 new mode 100644 index b9aa761bb3c..201b25e1643 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-expose-service.yaml +++ b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-expose-service.yaml @@ -1,36 +1,36 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: tcp-services - namespace: ingress-nginx -data: - 5432: default/cluster-example-lead-primary:5432 - ---- -apiVersion: v1 -kind: Service -metadata: - name: ingress-nginx - namespace: ingress-nginx - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -spec: - type: LoadBalancer - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP - - name: postgres - port: 5432 - targetPort: 5432 - protocol: TCP - selector: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tcp-services + namespace: ingress-nginx +data: + 5432: default/cluster-example-rw:5432 + +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + type: LoadBalancer + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-pvc-template.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-pvc-template.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore-pitr.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore-pitr.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-restore.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class-with-backup.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class-with-backup.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/cluster-storage-class.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.3.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.3.0.yaml deleted file mode 100755 index 3183d014ed0..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.3.0.yaml +++ /dev/null @@ -1,897 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresqls API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - applicationConfiguration: - description: Configuration from the application point of view - properties: - database: - description: Name of the database used by the application - minLength: 1 - type: string - owner: - description: Name of the owner of the database in the instance to - be used by applications. - minLength: 1 - type: string - required: - - database - - owner - type: object - backup: - description: The configuration to be used for backups - properties: - data: - description: The configuration to be used to backup the data files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to be - done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the backup, - defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if - this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - anyOf: - - type: integer - - type: string - description: Size of the storage. Required if not already specified - in the PVC template. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - required: - - applicationConfiguration - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this bdrGroup and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.3.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.3.0 - name: manager - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.4.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.4.0.yaml deleted file mode 100755 index 9d93a44a6cd..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.4.0.yaml +++ /dev/null @@ -1,1092 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.2ndq.io -spec: - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.2ndq.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - data: - description: The configuration to be used to backup the data files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to be - done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the backup, - defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if - this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if the - bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - backup - type: object - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: Name of the database used by the application. - type: string - owner: - description: Name of the owner of the database in the instance - to be used by applications. - type: string - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password, if empty - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.2ndq.io -spec: - group: postgresql.k8s.2ndq.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-2ndq-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.2ndq.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.2ndq.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.4.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.4.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-2ndq-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.2ndq.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.5.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.5.0.yaml deleted file mode 100755 index 552cb876750..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.5.0.yaml +++ /dev/null @@ -1,1144 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BackupSpec defines the desired state of Backup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: BackupStatus defines the observed state of Backup - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - - JSONPath: .status.instances - description: Number of instances - name: Instances - type: integer - - JSONPath: .status.readyInstances - description: Number of ready instances - name: Ready - type: integer - - JSONPath: .status.phase - description: Cluster current status - name: Status - type: string - - JSONPath: .status.currentPrimary - description: Primary pod - name: Primary - type: string - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of Cluster - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that) - type: string - immediateCheckpoint: - description: Whenever to force the initial checkpoint to - be done as quickly as possible - type: boolean - jobs: - description: The number of jobs to be used to upload the - backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream - properties: - compression: - description: Whenever to compress files or not - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that) - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - backup - type: object - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: Name of the database used by the application. - type: string - owner: - description: Name of the owner of the database in the instance - to be used by applications. - type: string - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the "postgres" user inside the image, defaults - to "26" - format: int64 - type: integer - postgresUID: - description: The UID of the "postgres" user inside the image, defaults - to "26" - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (PGDATA). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password, if empty - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: ClusterStatus defines the observed state of Cluster - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - type: string - phaseReason: - type: string - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ScheduledBackupSpec defines the desired state of ScheduledBackup - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: ScheduledBackupStatus defines the observed state of ScheduledBackup - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.5.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.5.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - imagePullSecrets: - - name: postgresql-operator-pull-secret - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.6.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.6.0.yaml deleted file mode 100755 index cec5851ee47..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.6.0.yaml +++ /dev/null @@ -1,1247 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - additionalPrinterColumns: - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - - JSONPath: .status.instances - description: Number of instances - name: Instances - type: integer - - JSONPath: .status.readyInstances - description: Number of ready instances - name: Ready - type: integer - - JSONPath: .status.phase - description: Cluster current status - name: Status - type: string - - JSONPath: .status.currentPrimary - description: Primary pod - name: Primary - type: string - conversion: - strategy: Webhook - webhookClientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /convert - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - validation: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, according - to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that). Allowed - options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the backup - initial checkpoint will be limited, according to the `checkpoint_completion_target` - setting on the PostgreSQL server. If set to true, an immediate - checkpoint will be used, meaning PostgreSQL will complete - the checkpoint as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to upload - the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and - may be unencrypted in the object store, according to the bucket - default policy. - properties: - compression: - description: Compress a WAL file before sending it to the - object store. Available options are empty string (no compression, - default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files (if - the bucket is not already configured for that). Allowed - options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the `database` - key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as a - consistent state is reached: in this case that means at the - end of a backup. This option allows to fine tune the recovery - process' - properties: - exclusive: - description: Set the target to be exclusive (defaults to - true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created with - `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" or - a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to let - you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, please - refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come up - again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines to - be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please refer - to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources - required. If Requests is omitted for a container, it defaults - to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL instance - node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new volume - and data will be restored to the volume at the same time. - If the provisioner does not support VolumeSnapshot data source, - volume will not be created and the failure will be reported - as an event. In the future, we plan to support more data source - types and the behavior of the provisioner may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - size: - description: Size of the storage. Required if not already specified - in the PVC template. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). Applied - after evaluating the PVC template, if available. If not specified, - generated PVCs will be satisfied by the default storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not defined - a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on the - cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can be - used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.3.0 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.6.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.6.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.7.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.7.0.yaml deleted file mode 100755 index fd222552ecb..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.7.0.yaml +++ /dev/null @@ -1,1262 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - caBundle: Cg== - service: - name: webhook-service - namespace: system - path: /convert - conversionReviewVersions: - - v1alpha1 - - v1beta1 - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the postgresql API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Should we enable anti affinity or not? - type: boolean - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - required: - - enablePodAntiAffinity - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: This field requires the VolumeSnapshotDataSource - alpha feature gate to be enabled and currently VolumeSnapshot - is the only supported data source. If the provisioner can - support VolumeSnapshot data source, it will create a new - volume and data will be restored to the volume at the same - time. If the provisioner does not support VolumeSnapshot - data source, volume will not be created and the failure - will be reported as an event. In the future, we plan to - support more data source types and the behavior of the provisioner - may change. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. This is a beta feature. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.7.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.7.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- clientConfig: - caBundle: Cg== - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.8.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.8.0.yaml deleted file mode 100755 index e2b5ef1ebb1..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-0.8.0.yaml +++ /dev/null @@ -1,2362 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: false - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: mbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: mbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:v0.8.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:v0.8.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: vbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: vbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-1.0.0.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-1.0.0.yaml deleted file mode 100755 index 7cc13552331..00000000000 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/postgresql-operator-1.0.0.yaml +++ /dev/null @@ -1,2362 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: backups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup is the Schema for the backups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the backup. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - status: - description: 'Most recently observed status of the backup. This data may - not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - backupId: - description: The ID of the Barman backup - type: string - commandError: - description: The backup command output - type: string - commandOutput: - description: The backup command output - type: string - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for - WALs and for data - type: string - encryption: - description: Encryption method required to S3 API - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, overriding - the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - phase: - description: The last backup status - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must be - a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must be - defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used if this - parameter is omitted - type: string - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - required: - - destinationPath - - s3Credentials - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: clusters.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the cluster. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - enablePodAntiAffinity: - description: Activates anti-affinity for the pods. The operator - will define pods anti-affinity unless this field is explicitly - set to false - type: boolean - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector is map of key-value pairs used to define - the nodes on which the pods can run. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' - type: object - topologyKey: - description: TopologyKey to use for anti-affinity configuration. - See k8s documentation for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - data: - description: The configuration to be used to backup the data - files When not defined, base backups files will be stored - uncompressed and may be unencrypted in the object store, - according to the bucket default policy. - properties: - compression: - description: Compress a backup file (a tar file per tablespace) - while streaming it to the object store. Available options - are empty string (no compression, default), `gzip` or - `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - immediateCheckpoint: - description: Control whether the I/O workload for the - backup initial checkpoint will be limited, according - to the `checkpoint_completion_target` setting on the - PostgreSQL server. If set to true, an immediate checkpoint - will be used, meaning PostgreSQL will complete the checkpoint - as soon as possible. `false` by default. - type: boolean - jobs: - description: The number of parallel jobs to be used to - upload the backup, defaults to 2 - format: int32 - type: integer - type: object - destinationPath: - description: The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used - for WALs and for data - minLength: 1 - type: string - endpointURL: - description: Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - required: - - accessKeyId - - secretAccessKey - type: object - serverName: - description: The server name on S3, the cluster name is used - if this parameter is omitted - type: string - wal: - description: The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed - and may be unencrypted in the object store, according to - the bucket default policy. - properties: - compression: - description: Compress a WAL file before sending it to - the object store. Available options are empty string - (no compression, default), `gzip` or `bzip2`. - type: string - encryption: - description: Whenever to force the encryption of files - (if the bucket is not already configured for that). - Allowed options are empty string (use the bucket policy, - default), `AES256` and `aws:kms` - type: string - type: object - required: - - destinationPath - - s3Credentials - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - options: - description: The list of options that must be passed to initdb - when creating the cluster - items: - type: string - type: array - owner: - description: Name of the owner of the database in the instance - to be used by applications. Defaults to the value of the - `database` key. - type: string - redwood: - description: If we need to enable/disable Redwood compatibility. - Requires EPAS and for EPAS defaults to true - type: boolean - secret: - description: Name of the secret containing the initial credentials - for the owner of the user database. If empty a new secret - will be created from scratch - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: The backup we need to restore - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - recoveryTarget: - description: 'By default the recovery will end as soon as - a consistent state is reached: in this case that means at - the end of a backup. This option allows to fine tune the - recovery process' - properties: - exclusive: - description: Set the target to be exclusive (defaults - to true) - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest", "current" - or a positive integer) - type: string - targetTime: - description: The target time, in any unambiguous representation - allowed by PostgreSQL - type: string - targetXID: - description: The target transaction ID - type: string - type: object - required: - - backup - type: object - type: object - description: - description: Description of this PostgreSQL cluster - type: string - imageName: - description: Name of the container image - minLength: 0 - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images. - If the license key contains a pull secret that secret will be automatically - included. - items: - description: LocalObjectReference contains enough information to - let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - instances: - description: Number of instances required in the cluster - format: int32 - minimum: 1 - type: integer - licenseKey: - description: The license key of the cluster. When empty, the cluster - operates in trial mode and after the expiry date (default 30 days) - the operator will cease any reconciliation attempt. For details, - please refer to the license agreement that comes with the operator. - type: string - maxSyncReplicas: - description: The target value for the synchronous replication quorum, - that can be decreased if the number of ready standbys is lower than - this. Undefined or 0 disable synchronous replication. - format: int32 - type: integer - minSyncReplicas: - description: Minimum number of instances required in synchronous replication - with the primary. Undefined or 0 allow writes to complete when no - standby is available. - format: int32 - type: integer - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - description: Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere) - type: boolean - required: - - inProgress - type: object - postgresGID: - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: PostgreSQL Host Based Authentication rules (lines - to be appended to the pg_hba.conf file) - items: - type: string - type: array - type: object - primaryUpdateStrategy: - description: 'Strategy to follow to upgrade the primary server during - a rolling update procedure, after all replicas have been successfully - updated: it can be automated (`unsupervised` - default) or manual - (`supervised`)' - type: string - resources: - description: Resources requirements of every generated Pod. Please - refer to https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources - allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - startDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance to successfully start up (default 30) - format: int32 - type: integer - stopDelay: - description: The time in seconds that is allowed for a PostgreSQL - instance node to gracefully shutdown (default 30) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: 'AccessModes contains the desired access modes - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' - items: - type: string - type: array - dataSource: - description: 'This field can be used to specify either: * - An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) * An existing - custom resource that implements data population (Alpha) - In order to use custom resource types that implement data - population, the AnyVolumeDataSource feature gate must be - enabled. If the provisioner or an external controller can - support the specified data source, it will create a new - volume based on the contents of the specified data source.' - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. If APIGroup is not specified, the specified - Kind must be in the core API group. For any other third-party - types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - resources: - description: 'Resources represents the minimum resources the - volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' - type: object - type: object - selector: - description: A label query over volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. - This array is replaced during a strategic merge - patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' - type: string - volumeMode: - description: volumeMode defines what type of volume is required - by the claim. Value of Filesystem is implied when not included - in claim spec. - type: string - volumeName: - description: VolumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: Size of the storage. Required if not already specified - in the PVC template. Changes to this field are automatically - reapplied to the created PVCs. Size cannot be decreased. - type: string - storageClass: - description: StorageClass to use for database data (`PGDATA`). - Applied after evaluating the PVC template, if available. If - not specified, generated PVCs will be satisfied by the default - storage class - type: string - required: - - size - type: object - superuserSecret: - description: The secret containing the superuser password. If not - defined a new secret will be created with a randomly generated password - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - instances - type: object - status: - description: 'Most recently observed status of the cluster. This data - may not be up to date. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - currentPrimary: - description: Current primary instance - type: string - danglingPVC: - description: List of all the PVCs created by this cluster and still - available which are not attached to a Pod - items: - type: string - type: array - instances: - description: Total number of instances in the cluster - format: int32 - type: integer - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: Instances status - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - format: int32 - type: integer - licenseStatus: - description: Status of the license - properties: - isTrial: - description: True if we are using a trial license - type: boolean - licenseExpiration: - description: The expiration timestamp of the license key, after - which the operator will cease any reconciliation attempt on - the cluster. - format: date-time - type: string - licenseStatus: - description: Current status the license key of the cluster - type: string - repositoryAccess: - description: True if the license embeds a pull secret that can - be used to access the repositories - type: boolean - valid: - description: Whether the license key is valid or not - type: boolean - required: - - repositoryAccess - - valid - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: Total number of ready instances in the cluster - format: int32 - type: integer - targetPrimary: - description: Target primary instance, this is different from the previous - one during a switchover or a failover - type: string - writeService: - description: Current write pod - type: string - type: object - type: object - served: true - storage: false - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null - name: scheduledbackups.postgresql.k8s.enterprisedb.io -spec: - conversion: - strategy: None - group: postgresql.k8s.enterprisedb.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - preserveUnknownFields: false - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - cluster: - description: The cluster to backup - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - schedule: - description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - type: string - suspend: - description: If this backup is suspended of not - type: boolean - required: - - schedule - type: object - status: - description: 'Most recently observed status of the ScheduledBackup. This - data may not be up to date. Populated by the system. Read-only. More - info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: mbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: mcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: mbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: mclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /mutate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: postgresql-operator-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - admissionregistration.k8s.io - resources: - - validatingwebhookconfigurations - verbs: - - get - - list - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - update -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - backups/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/finalizers - verbs: - - update -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - clusters/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.k8s.enterprisedb.io - resources: - - scheduledbackups/status - verbs: - - get - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - patch - - update -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: postgresql-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: postgresql-operator-manager -subjects: -- kind: ServiceAccount - name: postgresql-operator-manager - namespace: postgresql-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: postgresql-operator-controller-manager - namespace: postgresql-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --enable-leader-election - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: quay.io/enterprisedb/cloud-native-postgresql:1.0.0 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/enterprisedb/cloud-native-postgresql:1.0.0 - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 30Mi - securityContext: - runAsUser: 1001 - serviceAccountName: postgresql-operator-manager - terminationGracePeriodSeconds: 10 ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: postgresql-operator-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-backup - failurePolicy: Fail - name: vbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-cluster - failurePolicy: Fail - name: vcluster.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackup.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-backup - failurePolicy: Fail - name: vbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-cluster - failurePolicy: Fail - name: vclusterv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: postgresql-operator-webhook-service - namespace: postgresql-operator-system - path: /validate-postgresql-k8s-enterprisedb-io-v1alpha1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackupv1alpha1.kb.io - rules: - - apiGroups: - - postgresql.k8s.enterprisedb.io - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/scheduled-backup-example.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/scheduled-backup-example.yaml old mode 100755 new mode 100644 diff --git a/advocacy_docs/kubernetes/cloud_native_operator/samples/subscription.yaml b/advocacy_docs/kubernetes/cloud_native_operator/samples/subscription.yaml old mode 100755 new mode 100644 index 7035da016b7..20ff6874ee6 --- a/advocacy_docs/kubernetes/cloud_native_operator/samples/subscription.yaml +++ b/advocacy_docs/kubernetes/cloud_native_operator/samples/subscription.yaml @@ -1,10 +1,10 @@ -apiVersion: operators.coreos.com/v1 -kind: Subscription -metadata: - name: cloud-native-postgresql - namespace: openshift-operators -spec: - channel: beta - name: cloud-native-postgresql - source: certified-operators - sourceNamespace: openshift-marketplace +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: cloud-native-postgresql + namespace: openshift-operators +spec: + channel: stable + name: cloud-native-postgresql + source: certified-operators + sourceNamespace: openshift-marketplace diff --git a/advocacy_docs/kubernetes/cloud_native_operator/security.mdx b/advocacy_docs/kubernetes/cloud_native_operator/security.mdx index 90288f20c6f..9baf637d668 100644 --- a/advocacy_docs/kubernetes/cloud_native_operator/security.mdx +++ b/advocacy_docs/kubernetes/cloud_native_operator/security.mdx @@ -79,9 +79,72 @@ For example, you can request an initial amount of RAM of 32MiB (scalable to 128M cpu: "100m" ``` -[//]: # ( TODO: we may want to explain what happens to a pod that exceedes the resource limits: CPU -> trottle; MEMORY -> kill ) +Memory requests and limits are associated with containers, but it is useful to think of a pod as having a memory request +and limit. The memory request for the pod is the sum of the memory requests for all the containers in the pod. -!!! Seealso "Managing Compute Resources for Containers" +Pod scheduling is based on requests and not limits. A pod is scheduled to run on a Node only if the Node has enough +available memory to satisfy the pod's memory request. + +For each resource, we divide containers into 3 Quality of Service (QoS) classes, in decreasing order of priority: + +- *Guaranteed* +- *Burstable* +- *Best-Effort* + +For more details, please refer to the ["Configure Quality of Service for Pods"](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#qos-classes) section in the Kubernetes documentation. + +For a PostgreSQL workload it is recommended to set a "Guaranteed" QoS. + +In order to avoid resources related issues in Kubernetes, we can refer to the best practices for "out of resource" handling while creating +a cluster: + +- Specify your required values for memory and CPU in the resources section of the manifest file. + This way you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other resources + related issues on running instances. +- In order for the pods of your cluster to get assigned to the "Guaranteed" QoS class, you must set limits and requests + for both memory and CPU to the same value. +- Specify your required PostgreSQL memory parameters consistently with the pod resources (like you would do in a VM or physical machine scenario - see below). +- Set up database server pods on a dedicated node using nodeSelector. + See the ["nodeSelector field of the affinityconfiguration resource on the API reference page"](api_reference.md#affinityconfiguration). + +You can refer the following example manifest: + +```yaml +apiVersion: postgresql.k8s.enterprisedb.io/v1 +kind: Cluster +metadata: + name: postgresql-resources +spec: + + instances: 3 + + postgresql: + parameters: + shared_buffers: "256MB" + + resources: + requests: + memory: "1024Mi" + cpu: 1 + limits: + memory: "1024Mi" + cpu: 1 + + storage: + size: 1Gi +``` + +In the above example, we have specified `shared_buffers` parameter with a value of `256MB` - i.e. how much memory is +dedicated to the PostgreSQL server for caching data (the default value for this parameter is `128MB` in case it's not defined). + +A reasonable starting value for `shared_buffers` is 25% of the memory in your system. +For example: if your `shared_buffers` is 256 MB, then the recommended value for your container memory size is 1 GB, +which means that within a pod all the containers will have a total of 1 GB memory that Kubernetes will always preserve, +enabling our containers to work as expected. +For more details, please refer to the ["Resource Consumption"](https://www.postgresql.org/docs/current/runtime-config-resource.html) +section in the PostgreSQL documentation. + +!!! See also "Managing Compute Resources for Containers" For more details on resource management, please refer to the ["Managing Compute Resources for Containers"](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) page from the Kubernetes documentation. From a1fcdb93f3048e6ddba01e9b2422cfc724edb3e1 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Wed, 3 Mar 2021 14:17:05 -0500 Subject: [PATCH 06/28] Update README to make it clear config-sources is not required Former-commit-id: bb340da46704049c3a228c266fd07cdc895976f4 --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index ebb54fb46bb..f568760d0f6 100644 --- a/README.md +++ b/README.md @@ -36,8 +36,6 @@ We recommend using MacOS to work with the EDB Docs application. 1. Pull the shared icon files down with `git submodule update --init`. -1. Now select which sources you want with `yarn config-sources`. - 1. And finally, you can start up the site locally with `yarn develop`, which should make it live at `http://localhost:8000/`. Huzzah! ### Installation of PDF / Doc Conversion Tools (optional) @@ -64,7 +62,7 @@ If you are a Windows user, you can work with Docs without installing it locally ### Configuring Which Sources are Loaded -When doing local development of the site or advocacy content, you may want to load other sources to experience the full site. The more sources you load, the slower the site will build, so it's recommended to typically only load the content you'll be working with the most. +By default, all document sources will be loaded into the app during development. It's possible to set up a configuration file, `dev-sources.json`, to only load specific sources, but this is not required. #### `yarn config-sources` From 35468d90ae9aa19bb2185c4fa13d3f25c6b2cf7a Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Thu, 4 Mar 2021 15:54:30 +0530 Subject: [PATCH 07/28] Windows 32 and 64 update Former-commit-id: 1b7b855b187f4a3983a9b657777bb67a3665e049 --- .../01_supported_database_server_versions.mdx | 10 +- src/components/icon/iconNames.js | 176 ++++++ src/components/icon/iconType.js | 528 ++++++++++++++++++ 3 files changed, 709 insertions(+), 5 deletions(-) diff --git a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx index 294a311d54b..7d4ba9703b0 100644 --- a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx +++ b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx @@ -22,13 +22,13 @@ For detailed information, please see the EDB Postgres Advanced Server Installati **Windows 32:** -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | +| PostgreSQL Version | Language Pack Version | Procedural Language Version | | ----------------------------------------------- | --------------------- | ------------------------------ | | `9.6, 10` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | **Windows 64:** -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `PostgreSQL 9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.8 | -| `EDB Postgres Advanced Server 12` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | +| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | +| ------------------------------------------------- | --------------------- | ------------------------------ | +| `PostgreSQL 9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.8 | +| `EDB Postgres Advanced Server 9.6, 10, 11, 12, 13`| 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | diff --git a/src/components/icon/iconNames.js b/src/components/icon/iconNames.js index 1a8eaeccf47..d029d501073 100644 --- a/src/components/icon/iconNames.js +++ b/src/components/icon/iconNames.js @@ -1,9 +1,185 @@ export default { + ALERT: 'alert', + ANGULARJS_COLOR: 'angularjscolor', + ANNOUNCE: 'announce', + ARROW_LEFT: 'arrowleft', + ARROW_RIGHT: 'arrowright', BARMAN: 'barman', + BIG_DATA: 'bigdata', + BRAIN_CIRCUIT: 'braincircuit', + BRIEFCASE: 'briefcase', + BUSINESSMAN: 'businessman', + BUSINESSWOMAN: 'businesswoman', + C_COLOR: 'ccolor', + C_PLUS_PLUS_COLOR: 'cpluspluscolor', + C_SHARP_COLOR: 'csharpcolor', + C_SHARP: 'csharp', + CASE_STUDY: 'casestudy', + CENTOS_COLOR: 'centoscolor', + CERTIFICATE: 'certificate', + CHANGE: 'change', + CHECKMARK: 'checkmark', + CHEVRON_DOWN: 'chevrondown', + CHEVRON_RIGHT: 'chevronright', + CLOSE: 'close', + CLOUD_CHECKED: 'cloudchecked', + CLOUD_DB: 'clouddb', + CLOUD_DBA: 'clouddba', + CLOUD_PRIVATE: 'cloudprivate', + CLOUD_PUBLIC: 'cloudpublic', + CODE_WRITING: 'codewriting', COFFEE: 'coffee', + CONFIG_MANAGEMENT: 'configmanagement', + CONFLICT: 'conflict', + CONNECT: 'connect', + CONSOLE: 'console', + CONTACT: 'contact', + CONTAINER: 'container', + CONTROL: 'control', + CONVERT: 'convert', + CROWN: 'crown', + CUBES: 'cubes', + CYCLE: 'cycle', + DATA_TRANSFER: 'datatransfer', + DATABASE_ADMIN: 'databaseadmin', + DATABASE_BACKUP: 'databasebackup', + DATABASE: 'database', + DEBIAN_COLOR: 'debiancolor', + DELIVER_LOVE: 'deliverlove', + DESIGN: 'design', + DEVELOPER: 'developer', + DIPLOMA: 'diploma', + DJANGO_COLOR: 'djangocolor', + DOCKER_COLOR: 'dockercolor', + DOCKER_CONTAINER: 'dockercontainer', + DOCS: 'docs', + DOT_NET_COLOR: 'dotnetcolor', + DOTTED_BOX: 'dottedbox', + DOWNLOAD: 'download', + DRIVES: 'drives', + DUPLICATE: 'duplicate', + EARTH: 'earth', + EASY: 'easy', + EDB_ARK: 'edbark', + EDB_BADGE: 'edbbadge', + EDB_BART: 'edbbart', + EDB_DASHBOARD: 'edbdashboard', + EDB_DOCS_LOGO_DISC_DARK: 'edbdocslogodiscdark', + EDB_EFM: 'edbefm', + EDB_EPAS: 'edbepas', + EDB_KUBERNETES: 'edbkubernetes', + EDB_LOGO_DISC_DARK: 'edblogodiscdark', + EDB_LOGO_SYMBOL_BADGE: 'edblogosymbolbadge', + EDB_MIGRATION_PORTAL: 'edbmigrationportal', + EDB_MIGRATION_TOOLKIT: 'edbmigrationtoolkit', + EDB_PEM: 'edbpem', EDB_REPLICATE: 'edbreplicate', + EDB_REPLICATION: 'edbreplication', + EDB_SYMBOL: 'edbsymbol', + ELLIPSIS: 'ellipsis', + ENERGY: 'energy', + ENTERPRISE: 'enterprise', + EXPORT: 'export', EXTERNAL_LINK: 'externallink', + FILE: 'file', + GLOBE: 'globe', + GOLANG_COLOR: 'golangcolor', + GUI: 'gui', + HADOOP: 'hadoop', + HAMBURGER: 'hamburger', + HANDSHAKE: 'handshake', + HARD_TO_FIND: 'hardtofind', + HASKELL_COLOR: 'haskellcolor', + HIGH_AVAILABILITY: 'highavailability', + HTML_COLOR: 'htmlcolor', + IDEA_SHARING: 'ideasharing', + IMPROVE: 'improve', + INSTALL: 'install', + INSTANCES: 'instances', + IOS_COLOR: 'ioscolor', + JAVA_COLOR: 'javacolor', + JAVA: 'java', + JAVASCRIPT_COLOR: 'javascriptcolor', + JQUERY_COLOR: 'jquerycolor', + KNIGHT: 'knight', + KOTLIN_COLOR: 'kotlincolor', + KUBERNETES: 'kubernetes', + LAPTOP_CONFIG: 'laptopconfig', + LARAVEL: 'laravel', + LEADER: 'leader', + LEARNING: 'learning', + LINKEDIN: 'linkedin', + LINUX_COLOR: 'linuxcolor', + MACOS_COLOR: 'macoscolor', + MANAGED_OPERATIONS: 'managedoperations', + MIGRATE: 'migrate', + MISSION: 'mission', + MODULE: 'module', + MOON: 'moon', + MORE_TEAM: 'moreteam', + NAME_TAG: 'nametag', + NETWORK: 'network', + NETWORK2: 'network2', + NEW_WINDOW: 'newwindow', + NEWS: 'news', + NODEJS_COLOR: 'nodejscolor', + NODEJS: 'nodejs', + NOT_FOUND: 'notfound', + OPTIONS: 'options', + PANDAS: 'pandas', + PARTNER: 'partner', + PDF: 'pdf', + PEOPLE: 'people', + PERL_COLOR: 'perlcolor', + PHP_COLOR: 'phpcolor', + PHP: 'php', + PIGGY_BANK: 'piggybank', + PLANNER: 'planner', + POSTGRES_SUPPORT: 'postgressupport', + POSTGRESQL: 'postgresql', + PRESENTATION: 'presentation', + PROCESS_IMPROVEMENT: 'processimprovement', + PULSE: 'pulse', + PYTHON_COLOR: 'pythoncolor', + PYTHON: 'python', + QUOTE_LEFT: 'quoteleft', + QUOTE_RIGHT: 'quoteright', + REACT_NATIVE_COLOR: 'reactnativecolor', + REDHAT_COLOR: 'redhatcolor', + REDHAT: 'redhat', + REMOTE_DBA: 'remotedba', + REPLICATION: 'replication', + ROCKET: 'rocket', + RUBY_COLOR: 'rubycolor', + RUBY: 'ruby', + SEARCH: 'search', + SECURITY_CONFIG: 'securityconfig', + SERVER_ERROR: 'servererror', + SPEED: 'speed', + SQL: 'sql', + STACK: 'stack', + STAR: 'star', + STORE: 'store', + SUBSET: 'subset', + SUN: 'sun', + SUPPORT_PORTAL: 'supportportal', + SUPPORT: 'support', + SUSE_COLOR: 'susecolor', + SWIFT_COLOR: 'swiftcolor', SYNCHRONIZE: 'synchronize', + TEN_YEARS: 'tenyears', TICKET_STAR: 'ticketstar', + TICKET: 'ticket', + TOOLS: 'tools', TRAINING: 'training', + TREEHOUSE: 'treehouse', + TUTORIAL: 'tutorial', + TYPESCRIPT_COLOR: 'typescriptcolor', + UBUNTU_COLOR: 'ubuntucolor', + UNLOCK: 'unlock', + VENN: 'venn', + VUE_JS_COLOR: 'vuejscolor', + WEB_SECURITY: 'websecurity', + WEBINAR: 'webinar', + WINDOWS_COLOR: 'windowscolor', }; diff --git a/src/components/icon/iconType.js b/src/components/icon/iconType.js index fd47cda8572..d4a7c33e320 100644 --- a/src/components/icon/iconType.js +++ b/src/components/icon/iconType.js @@ -1,13 +1,189 @@ import React from 'react'; import iconNames from './iconNames'; +import AlertSvg from '../../../static/edb-icons/alert.svg'; +import AngularjsColorSvg from '../../../static/edb-icons/angularjs-color.svg'; +import AnnounceSvg from '../../../static/edb-icons/announce.svg'; +import ArrowLeftSvg from '../../../static/edb-icons/arrow-left.svg'; +import ArrowRightSvg from '../../../static/edb-icons/arrow-right.svg'; import BarmanSvg from '../../../static/edb-icons/barman.svg'; +import BigDataSvg from '../../../static/edb-icons/big-data.svg'; +import BrainCircuitSvg from '../../../static/edb-icons/brain-circuit.svg'; +import BriefcaseSvg from '../../../static/edb-icons/briefcase.svg'; +import BusinessmanSvg from '../../../static/edb-icons/businessman.svg'; +import BusinesswomanSvg from '../../../static/edb-icons/businesswoman.svg'; +import CColorSvg from '../../../static/edb-icons/c-color.svg'; +import CPlusPlusColorSvg from '../../../static/edb-icons/c-plus-plus-color.svg'; +import CSharpColorSvg from '../../../static/edb-icons/c-sharp-color.svg'; +import CSharpSvg from '../../../static/edb-icons/c-sharp.svg'; +import CaseStudySvg from '../../../static/edb-icons/case-study.svg'; +import CentosColorSvg from '../../../static/edb-icons/centos-color.svg'; +import CertificateSvg from '../../../static/edb-icons/certificate.svg'; +import ChangeSvg from '../../../static/edb-icons/change.svg'; +import CheckmarkSvg from '../../../static/edb-icons/checkmark.svg'; +import ChevronDownSvg from '../../../static/edb-icons/chevron-down.svg'; +import ChevronRightSvg from '../../../static/edb-icons/chevron-right.svg'; +import CloseSvg from '../../../static/edb-icons/close.svg'; +import CloudCheckedSvg from '../../../static/edb-icons/cloud-checked.svg'; +import CloudDbSvg from '../../../static/edb-icons/cloud-db.svg'; +import CloudDbaSvg from '../../../static/edb-icons/cloud-dba.svg'; +import CloudPrivateSvg from '../../../static/edb-icons/cloud-private.svg'; +import CloudPublicSvg from '../../../static/edb-icons/cloud-public.svg'; +import CodeWritingSvg from '../../../static/edb-icons/code-writing.svg'; import CoffeeSvg from '../../../static/edb-icons/coffee.svg'; +import ConfigManagementSvg from '../../../static/edb-icons/config-management.svg'; +import ConflictSvg from '../../../static/edb-icons/conflict.svg'; +import ConnectSvg from '../../../static/edb-icons/connect.svg'; +import ConsoleSvg from '../../../static/edb-icons/console.svg'; +import ContactSvg from '../../../static/edb-icons/contact.svg'; +import ContainerSvg from '../../../static/edb-icons/container.svg'; +import ControlSvg from '../../../static/edb-icons/control.svg'; +import ConvertSvg from '../../../static/edb-icons/convert.svg'; +import CrownSvg from '../../../static/edb-icons/crown.svg'; +import CubesSvg from '../../../static/edb-icons/cubes.svg'; +import CycleSvg from '../../../static/edb-icons/cycle.svg'; +import DataTransferSvg from '../../../static/edb-icons/data-transfer.svg'; +import DatabaseAdminSvg from '../../../static/edb-icons/database-admin.svg'; +import DatabaseBackupSvg from '../../../static/edb-icons/database-backup.svg'; +import DatabaseSvg from '../../../static/edb-icons/database.svg'; +import DebianColorSvg from '../../../static/edb-icons/debian-color.svg'; +import DeliverLoveSvg from '../../../static/edb-icons/deliver-love.svg'; +import DesignSvg from '../../../static/edb-icons/design.svg'; +import DeveloperSvg from '../../../static/edb-icons/developer.svg'; +import DiplomaSvg from '../../../static/edb-icons/diploma.svg'; +import DjangoColorSvg from '../../../static/edb-icons/django-color.svg'; +import DockerColorSvg from '../../../static/edb-icons/docker-color.svg'; +import DockerContainerSvg from '../../../static/edb-icons/docker-container.svg'; +import DocsSvg from '../../../static/edb-icons/docs.svg'; +import DotNetColorSvg from '../../../static/edb-icons/dot-net-color.svg'; +import DottedBoxSvg from '../../../static/edb-icons/dotted-box.svg'; +import DownloadSvg from '../../../static/edb-icons/download.svg'; +import DrivesSvg from '../../../static/edb-icons/drives.svg'; +import DuplicateSvg from '../../../static/edb-icons/duplicate.svg'; +import EarthSvg from '../../../static/edb-icons/earth.svg'; +import EasySvg from '../../../static/edb-icons/easy.svg'; +import EdbArkSvg from '../../../static/edb-icons/edb-ark.svg'; +import EdbBadgeSvg from '../../../static/edb-icons/edb-badge.svg'; +import EdbBartSvg from '../../../static/edb-icons/edb-bart.svg'; +import EdbDashboardSvg from '../../../static/edb-icons/edb-dashboard.svg'; +import EdbDocsLogoDiscDarkSvg from '../../../static/edb-icons/edb-docs-logo-disc-dark.svg'; +import EdbEfmSvg from '../../../static/edb-icons/edb-efm.svg'; +import EdbEpasSvg from '../../../static/edb-icons/edb-epas.svg'; +import EdbKubernetesSvg from '../../../static/edb-icons/edb-kubernetes.svg'; +import EdbLogoDiscDarkSvg from '../../../static/edb-icons/edb-logo-disc-dark.svg'; +import EdbLogoSymbolBadgeSvg from '../../../static/edb-icons/edb-logo-symbol-badge.svg'; +import EdbMigrationPortalSvg from '../../../static/edb-icons/edb-migration-portal.svg'; +import EdbMigrationToolkitSvg from '../../../static/edb-icons/edb-migration-toolkit.svg'; +import EdbPemSvg from '../../../static/edb-icons/edb-pem.svg'; import EdbReplicateSvg from '../../../static/edb-icons/edb-replicate.svg'; +import EdbReplicationSvg from '../../../static/edb-icons/edb-replication.svg'; +import EdbSymbolSvg from '../../../static/edb-icons/edb-symbol.svg'; +import EllipsisSvg from '../../../static/edb-icons/ellipsis.svg'; +import EnergySvg from '../../../static/edb-icons/energy.svg'; +import EnterpriseSvg from '../../../static/edb-icons/enterprise.svg'; +import ExportSvg from '../../../static/edb-icons/export.svg'; import ExternalLinkSvg from '../../../static/edb-icons/external-link.svg'; +import FileSvg from '../../../static/edb-icons/file.svg'; +import GlobeSvg from '../../../static/edb-icons/globe.svg'; +import GolangColorSvg from '../../../static/edb-icons/golang-color.svg'; +import GuiSvg from '../../../static/edb-icons/gui.svg'; +import HadoopSvg from '../../../static/edb-icons/hadoop.svg'; +import HamburgerSvg from '../../../static/edb-icons/hamburger.svg'; +import HandshakeSvg from '../../../static/edb-icons/handshake.svg'; +import HardToFindSvg from '../../../static/edb-icons/hard-to-find.svg'; +import HaskellColorSvg from '../../../static/edb-icons/haskell-color.svg'; +import HighAvailabilitySvg from '../../../static/edb-icons/high-availability.svg'; +import HtmlColorSvg from '../../../static/edb-icons/html-color.svg'; +import IdeaSharingSvg from '../../../static/edb-icons/idea-sharing.svg'; +import ImproveSvg from '../../../static/edb-icons/improve.svg'; +import InstallSvg from '../../../static/edb-icons/install.svg'; +import InstancesSvg from '../../../static/edb-icons/instances.svg'; +import IosColorSvg from '../../../static/edb-icons/ios-color.svg'; +import JavaColorSvg from '../../../static/edb-icons/java-color.svg'; +import JavaSvg from '../../../static/edb-icons/java.svg'; +import JavascriptColorSvg from '../../../static/edb-icons/javascript-color.svg'; +import JqueryColorSvg from '../../../static/edb-icons/jquery-color.svg'; +import KnightSvg from '../../../static/edb-icons/knight.svg'; +import KotlinColorSvg from '../../../static/edb-icons/kotlin-color.svg'; +import KubernetesSvg from '../../../static/edb-icons/kubernetes.svg'; +import LaptopConfigSvg from '../../../static/edb-icons/laptop-config.svg'; +import LaravelSvg from '../../../static/edb-icons/laravel.svg'; +import LeaderSvg from '../../../static/edb-icons/leader.svg'; +import LearningSvg from '../../../static/edb-icons/learning.svg'; +import LinkedinSvg from '../../../static/edb-icons/linkedin.svg'; +import LinuxColorSvg from '../../../static/edb-icons/linux-color.svg'; +import MacosColorSvg from '../../../static/edb-icons/macos-color.svg'; +import ManagedOperationsSvg from '../../../static/edb-icons/managed-operations.svg'; +import MigrateSvg from '../../../static/edb-icons/migrate.svg'; +import MissionSvg from '../../../static/edb-icons/mission.svg'; +import ModuleSvg from '../../../static/edb-icons/module.svg'; +import MoonSvg from '../../../static/edb-icons/moon.svg'; +import MoreTeamSvg from '../../../static/edb-icons/more-team.svg'; +import NameTagSvg from '../../../static/edb-icons/name-tag.svg'; +import NetworkSvg from '../../../static/edb-icons/network.svg'; +import Network2Svg from '../../../static/edb-icons/network2.svg'; +import NewWindowSvg from '../../../static/edb-icons/new-window.svg'; +import NewsSvg from '../../../static/edb-icons/news.svg'; +import NodejsColorSvg from '../../../static/edb-icons/nodejs-color.svg'; +import NodejsSvg from '../../../static/edb-icons/nodejs.svg'; +import NotFoundSvg from '../../../static/edb-icons/not-found.svg'; +import OptionsSvg from '../../../static/edb-icons/options.svg'; +import PandasSvg from '../../../static/edb-icons/pandas.svg'; +import PartnerSvg from '../../../static/edb-icons/partner.svg'; +import PdfSvg from '../../../static/edb-icons/pdf.svg'; +import PeopleSvg from '../../../static/edb-icons/people.svg'; +import PerlColorSvg from '../../../static/edb-icons/perl-color.svg'; +import PhpColorSvg from '../../../static/edb-icons/php-color.svg'; +import PhpSvg from '../../../static/edb-icons/php.svg'; +import PiggyBankSvg from '../../../static/edb-icons/piggy-bank.svg'; +import PlannerSvg from '../../../static/edb-icons/planner.svg'; +import PostgresSupportSvg from '../../../static/edb-icons/postgres-support.svg'; +import PostgresqlSvg from '../../../static/edb-icons/postgresql.svg'; +import PresentationSvg from '../../../static/edb-icons/presentation.svg'; +import ProcessImprovementSvg from '../../../static/edb-icons/process-improvement.svg'; +import PulseSvg from '../../../static/edb-icons/pulse.svg'; +import PythonColorSvg from '../../../static/edb-icons/python-color.svg'; +import PythonSvg from '../../../static/edb-icons/python.svg'; +import QuoteLeftSvg from '../../../static/edb-icons/quote-left.svg'; +import QuoteRightSvg from '../../../static/edb-icons/quote-right.svg'; +import ReactNativeColorSvg from '../../../static/edb-icons/react-native-color.svg'; +import RedhatColorSvg from '../../../static/edb-icons/redhat-color.svg'; +import RedhatSvg from '../../../static/edb-icons/redhat.svg'; +import RemoteDbaSvg from '../../../static/edb-icons/remote-dba.svg'; +import ReplicationSvg from '../../../static/edb-icons/replication.svg'; +import RocketSvg from '../../../static/edb-icons/rocket.svg'; +import RubyColorSvg from '../../../static/edb-icons/ruby-color.svg'; +import RubySvg from '../../../static/edb-icons/ruby.svg'; +import SearchSvg from '../../../static/edb-icons/search.svg'; +import SecurityConfigSvg from '../../../static/edb-icons/security-config.svg'; +import ServerErrorSvg from '../../../static/edb-icons/server-error.svg'; +import SpeedSvg from '../../../static/edb-icons/speed.svg'; +import SqlSvg from '../../../static/edb-icons/sql.svg'; +import StackSvg from '../../../static/edb-icons/stack.svg'; +import StarSvg from '../../../static/edb-icons/star.svg'; +import StoreSvg from '../../../static/edb-icons/store.svg'; +import SubsetSvg from '../../../static/edb-icons/subset.svg'; +import SunSvg from '../../../static/edb-icons/sun.svg'; +import SupportPortalSvg from '../../../static/edb-icons/support-portal.svg'; +import SupportSvg from '../../../static/edb-icons/support.svg'; +import SuseColorSvg from '../../../static/edb-icons/suse-color.svg'; +import SwiftColorSvg from '../../../static/edb-icons/swift-color.svg'; import SynchronizeSvg from '../../../static/edb-icons/synchronize.svg'; +import TenYearsSvg from '../../../static/edb-icons/ten-years.svg'; import TicketStarSvg from '../../../static/edb-icons/ticket-star.svg'; +import TicketSvg from '../../../static/edb-icons/ticket.svg'; +import ToolsSvg from '../../../static/edb-icons/tools.svg'; import TrainingSvg from '../../../static/edb-icons/training.svg'; +import TreehouseSvg from '../../../static/edb-icons/treehouse.svg'; +import TutorialSvg from '../../../static/edb-icons/tutorial.svg'; +import TypescriptColorSvg from '../../../static/edb-icons/typescript-color.svg'; +import UbuntuColorSvg from '../../../static/edb-icons/ubuntu-color.svg'; +import UnlockSvg from '../../../static/edb-icons/unlock.svg'; +import VennSvg from '../../../static/edb-icons/venn.svg'; +import VueJsColorSvg from '../../../static/edb-icons/vue-js-color.svg'; +import WebSecuritySvg from '../../../static/edb-icons/web-security.svg'; +import WebinarSvg from '../../../static/edb-icons/webinar.svg'; +import WindowsColorSvg from '../../../static/edb-icons/windows-color.svg'; function formatIconName(name) { return name && name.replace(/ /g, '').toLowerCase(); @@ -15,20 +191,372 @@ function formatIconName(name) { export default function IconType({ iconName, ...rest }) { switch (formatIconName(iconName)) { + case iconNames.ALERT: + return ; + case iconNames.ANGULARJS_COLOR: + return ; + case iconNames.ANNOUNCE: + return ; + case iconNames.ARROW_LEFT: + return ; + case iconNames.ARROW_RIGHT: + return ; case iconNames.BARMAN: return ; + case iconNames.BIG_DATA: + return ; + case iconNames.BRAIN_CIRCUIT: + return ; + case iconNames.BRIEFCASE: + return ; + case iconNames.BUSINESSMAN: + return ; + case iconNames.BUSINESSWOMAN: + return ; + case iconNames.C_COLOR: + return ; + case iconNames.C_PLUS_PLUS_COLOR: + return ; + case iconNames.C_SHARP_COLOR: + return ; + case iconNames.C_SHARP: + return ; + case iconNames.CASE_STUDY: + return ; + case iconNames.CENTOS_COLOR: + return ; + case iconNames.CERTIFICATE: + return ; + case iconNames.CHANGE: + return ; + case iconNames.CHECKMARK: + return ; + case iconNames.CHEVRON_DOWN: + return ; + case iconNames.CHEVRON_RIGHT: + return ; + case iconNames.CLOSE: + return ; + case iconNames.CLOUD_CHECKED: + return ; + case iconNames.CLOUD_DB: + return ; + case iconNames.CLOUD_DBA: + return ; + case iconNames.CLOUD_PRIVATE: + return ; + case iconNames.CLOUD_PUBLIC: + return ; + case iconNames.CODE_WRITING: + return ; case iconNames.COFFEE: return ; + case iconNames.CONFIG_MANAGEMENT: + return ; + case iconNames.CONFLICT: + return ; + case iconNames.CONNECT: + return ; + case iconNames.CONSOLE: + return ; + case iconNames.CONTACT: + return ; + case iconNames.CONTAINER: + return ; + case iconNames.CONTROL: + return ; + case iconNames.CONVERT: + return ; + case iconNames.CROWN: + return ; + case iconNames.CUBES: + return ; + case iconNames.CYCLE: + return ; + case iconNames.DATA_TRANSFER: + return ; + case iconNames.DATABASE_ADMIN: + return ; + case iconNames.DATABASE_BACKUP: + return ; + case iconNames.DATABASE: + return ; + case iconNames.DEBIAN_COLOR: + return ; + case iconNames.DELIVER_LOVE: + return ; + case iconNames.DESIGN: + return ; + case iconNames.DEVELOPER: + return ; + case iconNames.DIPLOMA: + return ; + case iconNames.DJANGO_COLOR: + return ; + case iconNames.DOCKER_COLOR: + return ; + case iconNames.DOCKER_CONTAINER: + return ; + case iconNames.DOCS: + return ; + case iconNames.DOT_NET_COLOR: + return ; + case iconNames.DOTTED_BOX: + return ; + case iconNames.DOWNLOAD: + return ; + case iconNames.DRIVES: + return ; + case iconNames.DUPLICATE: + return ; + case iconNames.EARTH: + return ; + case iconNames.EASY: + return ; + case iconNames.EDB_ARK: + return ; + case iconNames.EDB_BADGE: + return ; + case iconNames.EDB_BART: + return ; + case iconNames.EDB_DASHBOARD: + return ; + case iconNames.EDB_DOCS_LOGO_DISC_DARK: + return ; + case iconNames.EDB_EFM: + return ; + case iconNames.EDB_EPAS: + return ; + case iconNames.EDB_KUBERNETES: + return ; + case iconNames.EDB_LOGO_DISC_DARK: + return ; + case iconNames.EDB_LOGO_SYMBOL_BADGE: + return ; + case iconNames.EDB_MIGRATION_PORTAL: + return ; + case iconNames.EDB_MIGRATION_TOOLKIT: + return ; + case iconNames.EDB_PEM: + return ; case iconNames.EDB_REPLICATE: return ; + case iconNames.EDB_REPLICATION: + return ; + case iconNames.EDB_SYMBOL: + return ; + case iconNames.ELLIPSIS: + return ; + case iconNames.ENERGY: + return ; + case iconNames.ENTERPRISE: + return ; + case iconNames.EXPORT: + return ; case iconNames.EXTERNAL_LINK: return ; + case iconNames.FILE: + return ; + case iconNames.GLOBE: + return ; + case iconNames.GOLANG_COLOR: + return ; + case iconNames.GUI: + return ; + case iconNames.HADOOP: + return ; + case iconNames.HAMBURGER: + return ; + case iconNames.HANDSHAKE: + return ; + case iconNames.HARD_TO_FIND: + return ; + case iconNames.HASKELL_COLOR: + return ; + case iconNames.HIGH_AVAILABILITY: + return ; + case iconNames.HTML_COLOR: + return ; + case iconNames.IDEA_SHARING: + return ; + case iconNames.IMPROVE: + return ; + case iconNames.INSTALL: + return ; + case iconNames.INSTANCES: + return ; + case iconNames.IOS_COLOR: + return ; + case iconNames.JAVA_COLOR: + return ; + case iconNames.JAVA: + return ; + case iconNames.JAVASCRIPT_COLOR: + return ; + case iconNames.JQUERY_COLOR: + return ; + case iconNames.KNIGHT: + return ; + case iconNames.KOTLIN_COLOR: + return ; + case iconNames.KUBERNETES: + return ; + case iconNames.LAPTOP_CONFIG: + return ; + case iconNames.LARAVEL: + return ; + case iconNames.LEADER: + return ; + case iconNames.LEARNING: + return ; + case iconNames.LINKEDIN: + return ; + case iconNames.LINUX_COLOR: + return ; + case iconNames.MACOS_COLOR: + return ; + case iconNames.MANAGED_OPERATIONS: + return ; + case iconNames.MIGRATE: + return ; + case iconNames.MISSION: + return ; + case iconNames.MODULE: + return ; + case iconNames.MOON: + return ; + case iconNames.MORE_TEAM: + return ; + case iconNames.NAME_TAG: + return ; + case iconNames.NETWORK: + return ; + case iconNames.NETWORK2: + return ; + case iconNames.NEW_WINDOW: + return ; + case iconNames.NEWS: + return ; + case iconNames.NODEJS_COLOR: + return ; + case iconNames.NODEJS: + return ; + case iconNames.NOT_FOUND: + return ; + case iconNames.OPTIONS: + return ; + case iconNames.PANDAS: + return ; + case iconNames.PARTNER: + return ; + case iconNames.PDF: + return ; + case iconNames.PEOPLE: + return ; + case iconNames.PERL_COLOR: + return ; + case iconNames.PHP_COLOR: + return ; + case iconNames.PHP: + return ; + case iconNames.PIGGY_BANK: + return ; + case iconNames.PLANNER: + return ; + case iconNames.POSTGRES_SUPPORT: + return ; + case iconNames.POSTGRESQL: + return ; + case iconNames.PRESENTATION: + return ; + case iconNames.PROCESS_IMPROVEMENT: + return ; + case iconNames.PULSE: + return ; + case iconNames.PYTHON_COLOR: + return ; + case iconNames.PYTHON: + return ; + case iconNames.QUOTE_LEFT: + return ; + case iconNames.QUOTE_RIGHT: + return ; + case iconNames.REACT_NATIVE_COLOR: + return ; + case iconNames.REDHAT_COLOR: + return ; + case iconNames.REDHAT: + return ; + case iconNames.REMOTE_DBA: + return ; + case iconNames.REPLICATION: + return ; + case iconNames.ROCKET: + return ; + case iconNames.RUBY_COLOR: + return ; + case iconNames.RUBY: + return ; + case iconNames.SEARCH: + return ; + case iconNames.SECURITY_CONFIG: + return ; + case iconNames.SERVER_ERROR: + return ; + case iconNames.SPEED: + return ; + case iconNames.SQL: + return ; + case iconNames.STACK: + return ; + case iconNames.STAR: + return ; + case iconNames.STORE: + return ; + case iconNames.SUBSET: + return ; + case iconNames.SUN: + return ; + case iconNames.SUPPORT_PORTAL: + return ; + case iconNames.SUPPORT: + return ; + case iconNames.SUSE_COLOR: + return ; + case iconNames.SWIFT_COLOR: + return ; case iconNames.SYNCHRONIZE: return ; + case iconNames.TEN_YEARS: + return ; case iconNames.TICKET_STAR: return ; + case iconNames.TICKET: + return ; + case iconNames.TOOLS: + return ; case iconNames.TRAINING: return ; + case iconNames.TREEHOUSE: + return ; + case iconNames.TUTORIAL: + return ; + case iconNames.TYPESCRIPT_COLOR: + return ; + case iconNames.UBUNTU_COLOR: + return ; + case iconNames.UNLOCK: + return ; + case iconNames.VENN: + return ; + case iconNames.VUE_JS_COLOR: + return ; + case iconNames.WEB_SECURITY: + return ; + case iconNames.WEBINAR: + return ; + case iconNames.WINDOWS_COLOR: + return ; default: return null; } From 32d918a7f3d3534fbd4103826ad52098b40687e4 Mon Sep 17 00:00:00 2001 From: Robert Stringer Date: Thu, 4 Mar 2021 11:03:08 +0000 Subject: [PATCH 08/28] icons Former-commit-id: 65771385d80d46df559e878e675dfaa386acf9ea --- src/components/icon/iconNames.js | 176 +++++++++++ src/components/icon/iconType.js | 528 +++++++++++++++++++++++++++++++ 2 files changed, 704 insertions(+) diff --git a/src/components/icon/iconNames.js b/src/components/icon/iconNames.js index 1a8eaeccf47..d029d501073 100644 --- a/src/components/icon/iconNames.js +++ b/src/components/icon/iconNames.js @@ -1,9 +1,185 @@ export default { + ALERT: 'alert', + ANGULARJS_COLOR: 'angularjscolor', + ANNOUNCE: 'announce', + ARROW_LEFT: 'arrowleft', + ARROW_RIGHT: 'arrowright', BARMAN: 'barman', + BIG_DATA: 'bigdata', + BRAIN_CIRCUIT: 'braincircuit', + BRIEFCASE: 'briefcase', + BUSINESSMAN: 'businessman', + BUSINESSWOMAN: 'businesswoman', + C_COLOR: 'ccolor', + C_PLUS_PLUS_COLOR: 'cpluspluscolor', + C_SHARP_COLOR: 'csharpcolor', + C_SHARP: 'csharp', + CASE_STUDY: 'casestudy', + CENTOS_COLOR: 'centoscolor', + CERTIFICATE: 'certificate', + CHANGE: 'change', + CHECKMARK: 'checkmark', + CHEVRON_DOWN: 'chevrondown', + CHEVRON_RIGHT: 'chevronright', + CLOSE: 'close', + CLOUD_CHECKED: 'cloudchecked', + CLOUD_DB: 'clouddb', + CLOUD_DBA: 'clouddba', + CLOUD_PRIVATE: 'cloudprivate', + CLOUD_PUBLIC: 'cloudpublic', + CODE_WRITING: 'codewriting', COFFEE: 'coffee', + CONFIG_MANAGEMENT: 'configmanagement', + CONFLICT: 'conflict', + CONNECT: 'connect', + CONSOLE: 'console', + CONTACT: 'contact', + CONTAINER: 'container', + CONTROL: 'control', + CONVERT: 'convert', + CROWN: 'crown', + CUBES: 'cubes', + CYCLE: 'cycle', + DATA_TRANSFER: 'datatransfer', + DATABASE_ADMIN: 'databaseadmin', + DATABASE_BACKUP: 'databasebackup', + DATABASE: 'database', + DEBIAN_COLOR: 'debiancolor', + DELIVER_LOVE: 'deliverlove', + DESIGN: 'design', + DEVELOPER: 'developer', + DIPLOMA: 'diploma', + DJANGO_COLOR: 'djangocolor', + DOCKER_COLOR: 'dockercolor', + DOCKER_CONTAINER: 'dockercontainer', + DOCS: 'docs', + DOT_NET_COLOR: 'dotnetcolor', + DOTTED_BOX: 'dottedbox', + DOWNLOAD: 'download', + DRIVES: 'drives', + DUPLICATE: 'duplicate', + EARTH: 'earth', + EASY: 'easy', + EDB_ARK: 'edbark', + EDB_BADGE: 'edbbadge', + EDB_BART: 'edbbart', + EDB_DASHBOARD: 'edbdashboard', + EDB_DOCS_LOGO_DISC_DARK: 'edbdocslogodiscdark', + EDB_EFM: 'edbefm', + EDB_EPAS: 'edbepas', + EDB_KUBERNETES: 'edbkubernetes', + EDB_LOGO_DISC_DARK: 'edblogodiscdark', + EDB_LOGO_SYMBOL_BADGE: 'edblogosymbolbadge', + EDB_MIGRATION_PORTAL: 'edbmigrationportal', + EDB_MIGRATION_TOOLKIT: 'edbmigrationtoolkit', + EDB_PEM: 'edbpem', EDB_REPLICATE: 'edbreplicate', + EDB_REPLICATION: 'edbreplication', + EDB_SYMBOL: 'edbsymbol', + ELLIPSIS: 'ellipsis', + ENERGY: 'energy', + ENTERPRISE: 'enterprise', + EXPORT: 'export', EXTERNAL_LINK: 'externallink', + FILE: 'file', + GLOBE: 'globe', + GOLANG_COLOR: 'golangcolor', + GUI: 'gui', + HADOOP: 'hadoop', + HAMBURGER: 'hamburger', + HANDSHAKE: 'handshake', + HARD_TO_FIND: 'hardtofind', + HASKELL_COLOR: 'haskellcolor', + HIGH_AVAILABILITY: 'highavailability', + HTML_COLOR: 'htmlcolor', + IDEA_SHARING: 'ideasharing', + IMPROVE: 'improve', + INSTALL: 'install', + INSTANCES: 'instances', + IOS_COLOR: 'ioscolor', + JAVA_COLOR: 'javacolor', + JAVA: 'java', + JAVASCRIPT_COLOR: 'javascriptcolor', + JQUERY_COLOR: 'jquerycolor', + KNIGHT: 'knight', + KOTLIN_COLOR: 'kotlincolor', + KUBERNETES: 'kubernetes', + LAPTOP_CONFIG: 'laptopconfig', + LARAVEL: 'laravel', + LEADER: 'leader', + LEARNING: 'learning', + LINKEDIN: 'linkedin', + LINUX_COLOR: 'linuxcolor', + MACOS_COLOR: 'macoscolor', + MANAGED_OPERATIONS: 'managedoperations', + MIGRATE: 'migrate', + MISSION: 'mission', + MODULE: 'module', + MOON: 'moon', + MORE_TEAM: 'moreteam', + NAME_TAG: 'nametag', + NETWORK: 'network', + NETWORK2: 'network2', + NEW_WINDOW: 'newwindow', + NEWS: 'news', + NODEJS_COLOR: 'nodejscolor', + NODEJS: 'nodejs', + NOT_FOUND: 'notfound', + OPTIONS: 'options', + PANDAS: 'pandas', + PARTNER: 'partner', + PDF: 'pdf', + PEOPLE: 'people', + PERL_COLOR: 'perlcolor', + PHP_COLOR: 'phpcolor', + PHP: 'php', + PIGGY_BANK: 'piggybank', + PLANNER: 'planner', + POSTGRES_SUPPORT: 'postgressupport', + POSTGRESQL: 'postgresql', + PRESENTATION: 'presentation', + PROCESS_IMPROVEMENT: 'processimprovement', + PULSE: 'pulse', + PYTHON_COLOR: 'pythoncolor', + PYTHON: 'python', + QUOTE_LEFT: 'quoteleft', + QUOTE_RIGHT: 'quoteright', + REACT_NATIVE_COLOR: 'reactnativecolor', + REDHAT_COLOR: 'redhatcolor', + REDHAT: 'redhat', + REMOTE_DBA: 'remotedba', + REPLICATION: 'replication', + ROCKET: 'rocket', + RUBY_COLOR: 'rubycolor', + RUBY: 'ruby', + SEARCH: 'search', + SECURITY_CONFIG: 'securityconfig', + SERVER_ERROR: 'servererror', + SPEED: 'speed', + SQL: 'sql', + STACK: 'stack', + STAR: 'star', + STORE: 'store', + SUBSET: 'subset', + SUN: 'sun', + SUPPORT_PORTAL: 'supportportal', + SUPPORT: 'support', + SUSE_COLOR: 'susecolor', + SWIFT_COLOR: 'swiftcolor', SYNCHRONIZE: 'synchronize', + TEN_YEARS: 'tenyears', TICKET_STAR: 'ticketstar', + TICKET: 'ticket', + TOOLS: 'tools', TRAINING: 'training', + TREEHOUSE: 'treehouse', + TUTORIAL: 'tutorial', + TYPESCRIPT_COLOR: 'typescriptcolor', + UBUNTU_COLOR: 'ubuntucolor', + UNLOCK: 'unlock', + VENN: 'venn', + VUE_JS_COLOR: 'vuejscolor', + WEB_SECURITY: 'websecurity', + WEBINAR: 'webinar', + WINDOWS_COLOR: 'windowscolor', }; diff --git a/src/components/icon/iconType.js b/src/components/icon/iconType.js index fd47cda8572..d4a7c33e320 100644 --- a/src/components/icon/iconType.js +++ b/src/components/icon/iconType.js @@ -1,13 +1,189 @@ import React from 'react'; import iconNames from './iconNames'; +import AlertSvg from '../../../static/edb-icons/alert.svg'; +import AngularjsColorSvg from '../../../static/edb-icons/angularjs-color.svg'; +import AnnounceSvg from '../../../static/edb-icons/announce.svg'; +import ArrowLeftSvg from '../../../static/edb-icons/arrow-left.svg'; +import ArrowRightSvg from '../../../static/edb-icons/arrow-right.svg'; import BarmanSvg from '../../../static/edb-icons/barman.svg'; +import BigDataSvg from '../../../static/edb-icons/big-data.svg'; +import BrainCircuitSvg from '../../../static/edb-icons/brain-circuit.svg'; +import BriefcaseSvg from '../../../static/edb-icons/briefcase.svg'; +import BusinessmanSvg from '../../../static/edb-icons/businessman.svg'; +import BusinesswomanSvg from '../../../static/edb-icons/businesswoman.svg'; +import CColorSvg from '../../../static/edb-icons/c-color.svg'; +import CPlusPlusColorSvg from '../../../static/edb-icons/c-plus-plus-color.svg'; +import CSharpColorSvg from '../../../static/edb-icons/c-sharp-color.svg'; +import CSharpSvg from '../../../static/edb-icons/c-sharp.svg'; +import CaseStudySvg from '../../../static/edb-icons/case-study.svg'; +import CentosColorSvg from '../../../static/edb-icons/centos-color.svg'; +import CertificateSvg from '../../../static/edb-icons/certificate.svg'; +import ChangeSvg from '../../../static/edb-icons/change.svg'; +import CheckmarkSvg from '../../../static/edb-icons/checkmark.svg'; +import ChevronDownSvg from '../../../static/edb-icons/chevron-down.svg'; +import ChevronRightSvg from '../../../static/edb-icons/chevron-right.svg'; +import CloseSvg from '../../../static/edb-icons/close.svg'; +import CloudCheckedSvg from '../../../static/edb-icons/cloud-checked.svg'; +import CloudDbSvg from '../../../static/edb-icons/cloud-db.svg'; +import CloudDbaSvg from '../../../static/edb-icons/cloud-dba.svg'; +import CloudPrivateSvg from '../../../static/edb-icons/cloud-private.svg'; +import CloudPublicSvg from '../../../static/edb-icons/cloud-public.svg'; +import CodeWritingSvg from '../../../static/edb-icons/code-writing.svg'; import CoffeeSvg from '../../../static/edb-icons/coffee.svg'; +import ConfigManagementSvg from '../../../static/edb-icons/config-management.svg'; +import ConflictSvg from '../../../static/edb-icons/conflict.svg'; +import ConnectSvg from '../../../static/edb-icons/connect.svg'; +import ConsoleSvg from '../../../static/edb-icons/console.svg'; +import ContactSvg from '../../../static/edb-icons/contact.svg'; +import ContainerSvg from '../../../static/edb-icons/container.svg'; +import ControlSvg from '../../../static/edb-icons/control.svg'; +import ConvertSvg from '../../../static/edb-icons/convert.svg'; +import CrownSvg from '../../../static/edb-icons/crown.svg'; +import CubesSvg from '../../../static/edb-icons/cubes.svg'; +import CycleSvg from '../../../static/edb-icons/cycle.svg'; +import DataTransferSvg from '../../../static/edb-icons/data-transfer.svg'; +import DatabaseAdminSvg from '../../../static/edb-icons/database-admin.svg'; +import DatabaseBackupSvg from '../../../static/edb-icons/database-backup.svg'; +import DatabaseSvg from '../../../static/edb-icons/database.svg'; +import DebianColorSvg from '../../../static/edb-icons/debian-color.svg'; +import DeliverLoveSvg from '../../../static/edb-icons/deliver-love.svg'; +import DesignSvg from '../../../static/edb-icons/design.svg'; +import DeveloperSvg from '../../../static/edb-icons/developer.svg'; +import DiplomaSvg from '../../../static/edb-icons/diploma.svg'; +import DjangoColorSvg from '../../../static/edb-icons/django-color.svg'; +import DockerColorSvg from '../../../static/edb-icons/docker-color.svg'; +import DockerContainerSvg from '../../../static/edb-icons/docker-container.svg'; +import DocsSvg from '../../../static/edb-icons/docs.svg'; +import DotNetColorSvg from '../../../static/edb-icons/dot-net-color.svg'; +import DottedBoxSvg from '../../../static/edb-icons/dotted-box.svg'; +import DownloadSvg from '../../../static/edb-icons/download.svg'; +import DrivesSvg from '../../../static/edb-icons/drives.svg'; +import DuplicateSvg from '../../../static/edb-icons/duplicate.svg'; +import EarthSvg from '../../../static/edb-icons/earth.svg'; +import EasySvg from '../../../static/edb-icons/easy.svg'; +import EdbArkSvg from '../../../static/edb-icons/edb-ark.svg'; +import EdbBadgeSvg from '../../../static/edb-icons/edb-badge.svg'; +import EdbBartSvg from '../../../static/edb-icons/edb-bart.svg'; +import EdbDashboardSvg from '../../../static/edb-icons/edb-dashboard.svg'; +import EdbDocsLogoDiscDarkSvg from '../../../static/edb-icons/edb-docs-logo-disc-dark.svg'; +import EdbEfmSvg from '../../../static/edb-icons/edb-efm.svg'; +import EdbEpasSvg from '../../../static/edb-icons/edb-epas.svg'; +import EdbKubernetesSvg from '../../../static/edb-icons/edb-kubernetes.svg'; +import EdbLogoDiscDarkSvg from '../../../static/edb-icons/edb-logo-disc-dark.svg'; +import EdbLogoSymbolBadgeSvg from '../../../static/edb-icons/edb-logo-symbol-badge.svg'; +import EdbMigrationPortalSvg from '../../../static/edb-icons/edb-migration-portal.svg'; +import EdbMigrationToolkitSvg from '../../../static/edb-icons/edb-migration-toolkit.svg'; +import EdbPemSvg from '../../../static/edb-icons/edb-pem.svg'; import EdbReplicateSvg from '../../../static/edb-icons/edb-replicate.svg'; +import EdbReplicationSvg from '../../../static/edb-icons/edb-replication.svg'; +import EdbSymbolSvg from '../../../static/edb-icons/edb-symbol.svg'; +import EllipsisSvg from '../../../static/edb-icons/ellipsis.svg'; +import EnergySvg from '../../../static/edb-icons/energy.svg'; +import EnterpriseSvg from '../../../static/edb-icons/enterprise.svg'; +import ExportSvg from '../../../static/edb-icons/export.svg'; import ExternalLinkSvg from '../../../static/edb-icons/external-link.svg'; +import FileSvg from '../../../static/edb-icons/file.svg'; +import GlobeSvg from '../../../static/edb-icons/globe.svg'; +import GolangColorSvg from '../../../static/edb-icons/golang-color.svg'; +import GuiSvg from '../../../static/edb-icons/gui.svg'; +import HadoopSvg from '../../../static/edb-icons/hadoop.svg'; +import HamburgerSvg from '../../../static/edb-icons/hamburger.svg'; +import HandshakeSvg from '../../../static/edb-icons/handshake.svg'; +import HardToFindSvg from '../../../static/edb-icons/hard-to-find.svg'; +import HaskellColorSvg from '../../../static/edb-icons/haskell-color.svg'; +import HighAvailabilitySvg from '../../../static/edb-icons/high-availability.svg'; +import HtmlColorSvg from '../../../static/edb-icons/html-color.svg'; +import IdeaSharingSvg from '../../../static/edb-icons/idea-sharing.svg'; +import ImproveSvg from '../../../static/edb-icons/improve.svg'; +import InstallSvg from '../../../static/edb-icons/install.svg'; +import InstancesSvg from '../../../static/edb-icons/instances.svg'; +import IosColorSvg from '../../../static/edb-icons/ios-color.svg'; +import JavaColorSvg from '../../../static/edb-icons/java-color.svg'; +import JavaSvg from '../../../static/edb-icons/java.svg'; +import JavascriptColorSvg from '../../../static/edb-icons/javascript-color.svg'; +import JqueryColorSvg from '../../../static/edb-icons/jquery-color.svg'; +import KnightSvg from '../../../static/edb-icons/knight.svg'; +import KotlinColorSvg from '../../../static/edb-icons/kotlin-color.svg'; +import KubernetesSvg from '../../../static/edb-icons/kubernetes.svg'; +import LaptopConfigSvg from '../../../static/edb-icons/laptop-config.svg'; +import LaravelSvg from '../../../static/edb-icons/laravel.svg'; +import LeaderSvg from '../../../static/edb-icons/leader.svg'; +import LearningSvg from '../../../static/edb-icons/learning.svg'; +import LinkedinSvg from '../../../static/edb-icons/linkedin.svg'; +import LinuxColorSvg from '../../../static/edb-icons/linux-color.svg'; +import MacosColorSvg from '../../../static/edb-icons/macos-color.svg'; +import ManagedOperationsSvg from '../../../static/edb-icons/managed-operations.svg'; +import MigrateSvg from '../../../static/edb-icons/migrate.svg'; +import MissionSvg from '../../../static/edb-icons/mission.svg'; +import ModuleSvg from '../../../static/edb-icons/module.svg'; +import MoonSvg from '../../../static/edb-icons/moon.svg'; +import MoreTeamSvg from '../../../static/edb-icons/more-team.svg'; +import NameTagSvg from '../../../static/edb-icons/name-tag.svg'; +import NetworkSvg from '../../../static/edb-icons/network.svg'; +import Network2Svg from '../../../static/edb-icons/network2.svg'; +import NewWindowSvg from '../../../static/edb-icons/new-window.svg'; +import NewsSvg from '../../../static/edb-icons/news.svg'; +import NodejsColorSvg from '../../../static/edb-icons/nodejs-color.svg'; +import NodejsSvg from '../../../static/edb-icons/nodejs.svg'; +import NotFoundSvg from '../../../static/edb-icons/not-found.svg'; +import OptionsSvg from '../../../static/edb-icons/options.svg'; +import PandasSvg from '../../../static/edb-icons/pandas.svg'; +import PartnerSvg from '../../../static/edb-icons/partner.svg'; +import PdfSvg from '../../../static/edb-icons/pdf.svg'; +import PeopleSvg from '../../../static/edb-icons/people.svg'; +import PerlColorSvg from '../../../static/edb-icons/perl-color.svg'; +import PhpColorSvg from '../../../static/edb-icons/php-color.svg'; +import PhpSvg from '../../../static/edb-icons/php.svg'; +import PiggyBankSvg from '../../../static/edb-icons/piggy-bank.svg'; +import PlannerSvg from '../../../static/edb-icons/planner.svg'; +import PostgresSupportSvg from '../../../static/edb-icons/postgres-support.svg'; +import PostgresqlSvg from '../../../static/edb-icons/postgresql.svg'; +import PresentationSvg from '../../../static/edb-icons/presentation.svg'; +import ProcessImprovementSvg from '../../../static/edb-icons/process-improvement.svg'; +import PulseSvg from '../../../static/edb-icons/pulse.svg'; +import PythonColorSvg from '../../../static/edb-icons/python-color.svg'; +import PythonSvg from '../../../static/edb-icons/python.svg'; +import QuoteLeftSvg from '../../../static/edb-icons/quote-left.svg'; +import QuoteRightSvg from '../../../static/edb-icons/quote-right.svg'; +import ReactNativeColorSvg from '../../../static/edb-icons/react-native-color.svg'; +import RedhatColorSvg from '../../../static/edb-icons/redhat-color.svg'; +import RedhatSvg from '../../../static/edb-icons/redhat.svg'; +import RemoteDbaSvg from '../../../static/edb-icons/remote-dba.svg'; +import ReplicationSvg from '../../../static/edb-icons/replication.svg'; +import RocketSvg from '../../../static/edb-icons/rocket.svg'; +import RubyColorSvg from '../../../static/edb-icons/ruby-color.svg'; +import RubySvg from '../../../static/edb-icons/ruby.svg'; +import SearchSvg from '../../../static/edb-icons/search.svg'; +import SecurityConfigSvg from '../../../static/edb-icons/security-config.svg'; +import ServerErrorSvg from '../../../static/edb-icons/server-error.svg'; +import SpeedSvg from '../../../static/edb-icons/speed.svg'; +import SqlSvg from '../../../static/edb-icons/sql.svg'; +import StackSvg from '../../../static/edb-icons/stack.svg'; +import StarSvg from '../../../static/edb-icons/star.svg'; +import StoreSvg from '../../../static/edb-icons/store.svg'; +import SubsetSvg from '../../../static/edb-icons/subset.svg'; +import SunSvg from '../../../static/edb-icons/sun.svg'; +import SupportPortalSvg from '../../../static/edb-icons/support-portal.svg'; +import SupportSvg from '../../../static/edb-icons/support.svg'; +import SuseColorSvg from '../../../static/edb-icons/suse-color.svg'; +import SwiftColorSvg from '../../../static/edb-icons/swift-color.svg'; import SynchronizeSvg from '../../../static/edb-icons/synchronize.svg'; +import TenYearsSvg from '../../../static/edb-icons/ten-years.svg'; import TicketStarSvg from '../../../static/edb-icons/ticket-star.svg'; +import TicketSvg from '../../../static/edb-icons/ticket.svg'; +import ToolsSvg from '../../../static/edb-icons/tools.svg'; import TrainingSvg from '../../../static/edb-icons/training.svg'; +import TreehouseSvg from '../../../static/edb-icons/treehouse.svg'; +import TutorialSvg from '../../../static/edb-icons/tutorial.svg'; +import TypescriptColorSvg from '../../../static/edb-icons/typescript-color.svg'; +import UbuntuColorSvg from '../../../static/edb-icons/ubuntu-color.svg'; +import UnlockSvg from '../../../static/edb-icons/unlock.svg'; +import VennSvg from '../../../static/edb-icons/venn.svg'; +import VueJsColorSvg from '../../../static/edb-icons/vue-js-color.svg'; +import WebSecuritySvg from '../../../static/edb-icons/web-security.svg'; +import WebinarSvg from '../../../static/edb-icons/webinar.svg'; +import WindowsColorSvg from '../../../static/edb-icons/windows-color.svg'; function formatIconName(name) { return name && name.replace(/ /g, '').toLowerCase(); @@ -15,20 +191,372 @@ function formatIconName(name) { export default function IconType({ iconName, ...rest }) { switch (formatIconName(iconName)) { + case iconNames.ALERT: + return ; + case iconNames.ANGULARJS_COLOR: + return ; + case iconNames.ANNOUNCE: + return ; + case iconNames.ARROW_LEFT: + return ; + case iconNames.ARROW_RIGHT: + return ; case iconNames.BARMAN: return ; + case iconNames.BIG_DATA: + return ; + case iconNames.BRAIN_CIRCUIT: + return ; + case iconNames.BRIEFCASE: + return ; + case iconNames.BUSINESSMAN: + return ; + case iconNames.BUSINESSWOMAN: + return ; + case iconNames.C_COLOR: + return ; + case iconNames.C_PLUS_PLUS_COLOR: + return ; + case iconNames.C_SHARP_COLOR: + return ; + case iconNames.C_SHARP: + return ; + case iconNames.CASE_STUDY: + return ; + case iconNames.CENTOS_COLOR: + return ; + case iconNames.CERTIFICATE: + return ; + case iconNames.CHANGE: + return ; + case iconNames.CHECKMARK: + return ; + case iconNames.CHEVRON_DOWN: + return ; + case iconNames.CHEVRON_RIGHT: + return ; + case iconNames.CLOSE: + return ; + case iconNames.CLOUD_CHECKED: + return ; + case iconNames.CLOUD_DB: + return ; + case iconNames.CLOUD_DBA: + return ; + case iconNames.CLOUD_PRIVATE: + return ; + case iconNames.CLOUD_PUBLIC: + return ; + case iconNames.CODE_WRITING: + return ; case iconNames.COFFEE: return ; + case iconNames.CONFIG_MANAGEMENT: + return ; + case iconNames.CONFLICT: + return ; + case iconNames.CONNECT: + return ; + case iconNames.CONSOLE: + return ; + case iconNames.CONTACT: + return ; + case iconNames.CONTAINER: + return ; + case iconNames.CONTROL: + return ; + case iconNames.CONVERT: + return ; + case iconNames.CROWN: + return ; + case iconNames.CUBES: + return ; + case iconNames.CYCLE: + return ; + case iconNames.DATA_TRANSFER: + return ; + case iconNames.DATABASE_ADMIN: + return ; + case iconNames.DATABASE_BACKUP: + return ; + case iconNames.DATABASE: + return ; + case iconNames.DEBIAN_COLOR: + return ; + case iconNames.DELIVER_LOVE: + return ; + case iconNames.DESIGN: + return ; + case iconNames.DEVELOPER: + return ; + case iconNames.DIPLOMA: + return ; + case iconNames.DJANGO_COLOR: + return ; + case iconNames.DOCKER_COLOR: + return ; + case iconNames.DOCKER_CONTAINER: + return ; + case iconNames.DOCS: + return ; + case iconNames.DOT_NET_COLOR: + return ; + case iconNames.DOTTED_BOX: + return ; + case iconNames.DOWNLOAD: + return ; + case iconNames.DRIVES: + return ; + case iconNames.DUPLICATE: + return ; + case iconNames.EARTH: + return ; + case iconNames.EASY: + return ; + case iconNames.EDB_ARK: + return ; + case iconNames.EDB_BADGE: + return ; + case iconNames.EDB_BART: + return ; + case iconNames.EDB_DASHBOARD: + return ; + case iconNames.EDB_DOCS_LOGO_DISC_DARK: + return ; + case iconNames.EDB_EFM: + return ; + case iconNames.EDB_EPAS: + return ; + case iconNames.EDB_KUBERNETES: + return ; + case iconNames.EDB_LOGO_DISC_DARK: + return ; + case iconNames.EDB_LOGO_SYMBOL_BADGE: + return ; + case iconNames.EDB_MIGRATION_PORTAL: + return ; + case iconNames.EDB_MIGRATION_TOOLKIT: + return ; + case iconNames.EDB_PEM: + return ; case iconNames.EDB_REPLICATE: return ; + case iconNames.EDB_REPLICATION: + return ; + case iconNames.EDB_SYMBOL: + return ; + case iconNames.ELLIPSIS: + return ; + case iconNames.ENERGY: + return ; + case iconNames.ENTERPRISE: + return ; + case iconNames.EXPORT: + return ; case iconNames.EXTERNAL_LINK: return ; + case iconNames.FILE: + return ; + case iconNames.GLOBE: + return ; + case iconNames.GOLANG_COLOR: + return ; + case iconNames.GUI: + return ; + case iconNames.HADOOP: + return ; + case iconNames.HAMBURGER: + return ; + case iconNames.HANDSHAKE: + return ; + case iconNames.HARD_TO_FIND: + return ; + case iconNames.HASKELL_COLOR: + return ; + case iconNames.HIGH_AVAILABILITY: + return ; + case iconNames.HTML_COLOR: + return ; + case iconNames.IDEA_SHARING: + return ; + case iconNames.IMPROVE: + return ; + case iconNames.INSTALL: + return ; + case iconNames.INSTANCES: + return ; + case iconNames.IOS_COLOR: + return ; + case iconNames.JAVA_COLOR: + return ; + case iconNames.JAVA: + return ; + case iconNames.JAVASCRIPT_COLOR: + return ; + case iconNames.JQUERY_COLOR: + return ; + case iconNames.KNIGHT: + return ; + case iconNames.KOTLIN_COLOR: + return ; + case iconNames.KUBERNETES: + return ; + case iconNames.LAPTOP_CONFIG: + return ; + case iconNames.LARAVEL: + return ; + case iconNames.LEADER: + return ; + case iconNames.LEARNING: + return ; + case iconNames.LINKEDIN: + return ; + case iconNames.LINUX_COLOR: + return ; + case iconNames.MACOS_COLOR: + return ; + case iconNames.MANAGED_OPERATIONS: + return ; + case iconNames.MIGRATE: + return ; + case iconNames.MISSION: + return ; + case iconNames.MODULE: + return ; + case iconNames.MOON: + return ; + case iconNames.MORE_TEAM: + return ; + case iconNames.NAME_TAG: + return ; + case iconNames.NETWORK: + return ; + case iconNames.NETWORK2: + return ; + case iconNames.NEW_WINDOW: + return ; + case iconNames.NEWS: + return ; + case iconNames.NODEJS_COLOR: + return ; + case iconNames.NODEJS: + return ; + case iconNames.NOT_FOUND: + return ; + case iconNames.OPTIONS: + return ; + case iconNames.PANDAS: + return ; + case iconNames.PARTNER: + return ; + case iconNames.PDF: + return ; + case iconNames.PEOPLE: + return ; + case iconNames.PERL_COLOR: + return ; + case iconNames.PHP_COLOR: + return ; + case iconNames.PHP: + return ; + case iconNames.PIGGY_BANK: + return ; + case iconNames.PLANNER: + return ; + case iconNames.POSTGRES_SUPPORT: + return ; + case iconNames.POSTGRESQL: + return ; + case iconNames.PRESENTATION: + return ; + case iconNames.PROCESS_IMPROVEMENT: + return ; + case iconNames.PULSE: + return ; + case iconNames.PYTHON_COLOR: + return ; + case iconNames.PYTHON: + return ; + case iconNames.QUOTE_LEFT: + return ; + case iconNames.QUOTE_RIGHT: + return ; + case iconNames.REACT_NATIVE_COLOR: + return ; + case iconNames.REDHAT_COLOR: + return ; + case iconNames.REDHAT: + return ; + case iconNames.REMOTE_DBA: + return ; + case iconNames.REPLICATION: + return ; + case iconNames.ROCKET: + return ; + case iconNames.RUBY_COLOR: + return ; + case iconNames.RUBY: + return ; + case iconNames.SEARCH: + return ; + case iconNames.SECURITY_CONFIG: + return ; + case iconNames.SERVER_ERROR: + return ; + case iconNames.SPEED: + return ; + case iconNames.SQL: + return ; + case iconNames.STACK: + return ; + case iconNames.STAR: + return ; + case iconNames.STORE: + return ; + case iconNames.SUBSET: + return ; + case iconNames.SUN: + return ; + case iconNames.SUPPORT_PORTAL: + return ; + case iconNames.SUPPORT: + return ; + case iconNames.SUSE_COLOR: + return ; + case iconNames.SWIFT_COLOR: + return ; case iconNames.SYNCHRONIZE: return ; + case iconNames.TEN_YEARS: + return ; case iconNames.TICKET_STAR: return ; + case iconNames.TICKET: + return ; + case iconNames.TOOLS: + return ; case iconNames.TRAINING: return ; + case iconNames.TREEHOUSE: + return ; + case iconNames.TUTORIAL: + return ; + case iconNames.TYPESCRIPT_COLOR: + return ; + case iconNames.UBUNTU_COLOR: + return ; + case iconNames.UNLOCK: + return ; + case iconNames.VENN: + return ; + case iconNames.VUE_JS_COLOR: + return ; + case iconNames.WEB_SECURITY: + return ; + case iconNames.WEBINAR: + return ; + case iconNames.WINDOWS_COLOR: + return ; default: return null; } From 30e6d6c48a5fa34d3392e4e5cad779e8cb89cd7e Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Thu, 4 Mar 2021 11:14:44 +0000 Subject: [PATCH 09/28] New PDFs generated by Github Actions Former-commit-id: 6fa94e78dff72e00a7300b204d42d1559ec9823d From 6a25b02c40147baed2f90530d870ceb8ed9af2f4 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Thu, 4 Mar 2021 11:30:28 +0000 Subject: [PATCH 10/28] New PDFs generated by Github Actions Former-commit-id: 2ccab2434d7472e63ce3c8103f9118a83cf37236 From ae3d3f71e68c00fd258a5ba191e96bc1841a91fa Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 11:14:31 -0500 Subject: [PATCH 11/28] Wrap title in double quotes to prevent issues with apostrophes in titles; add more logging output to pdf script Former-commit-id: 00ce3d795698a6547f8cd57e62668aad14bf80c5 --- scripts/pdf/generate_pdf.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/scripts/pdf/generate_pdf.py b/scripts/pdf/generate_pdf.py index bb399990732..c599d1190e9 100644 --- a/scripts/pdf/generate_pdf.py +++ b/scripts/pdf/generate_pdf.py @@ -6,6 +6,12 @@ basePath = pathlib.Path(__file__).parent.absolute() +ANSI_STOP = '\033[0m' +ANSI_BLUE = '\033[34m' +ANSI_GREEN = '\033[32m' +ANSI_YELLOW = '\033[33m' +ANSI_RED = '\033[31m' + # magic snippet for inline repl # import code; code.interact(local=dict(globals(), **locals())) @@ -102,6 +108,8 @@ def main(): guide + '_' if guide else '' ) + print(ANSI_BLUE + 'building {}'.format(pdfFilePath) + ANSI_STOP) + if not os.path.exists(dirName): raise Exception('directory does not exist') @@ -159,6 +167,7 @@ def main(): title = getTitle(dirName) or product + print('generating docs html') os.system( "pandoc {0} " \ "-f gfm " \ @@ -176,10 +185,11 @@ def main(): if html: os.system("open " + htmlFilePath) else: + print('generating cover page') os.system( "sed " \ - "-e 's/\[PRODUCT\]/{1}/' " \ - "-e 's/\[VERSION\]/{2}/' " \ + "-e \"s/\[PRODUCT\]/{1}/\" " \ + "-e \"s/\[VERSION\]/{2}/\" " \ "scripts/pdf/cover.html " \ "> {0}" \ "".format(coverFilePath, title, version) @@ -196,10 +206,11 @@ def main(): "--footer-font-size 8 " \ "--footer-spacing 7 ".format(datetime.datetime.now().year) + print('converting html to pdf') os.system( "wkhtmltopdf " \ "--log-level error " \ - "--title '{3}' " \ + "--title \"{3}\" " \ "--margin-top 15mm " \ "--margin-bottom 15mm " \ "{0} " \ From 606218bca9760b397714ca34bf5d57d465ecb5ec Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 11:36:09 -0500 Subject: [PATCH 12/28] Fail build-all-pdfs if a single pdf fails to build Former-commit-id: 7c302c4679ff25e0da4f1d6630c01be9bc5912d9 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 651af77e6c9..a66e74f78a5 100644 --- a/package.json +++ b/package.json @@ -16,7 +16,7 @@ "serve-build": "gatsby serve --prefix-paths", "update-icons": "git submodule update --init --remote && node scripts/createIconTypes.js && node scripts/createIconNames.js", "build-pdf": "python3 scripts/pdf/generate_pdf.py", - "build-all-pdfs": "for i in product_docs/docs/**/*/ ; do echo \"$i\"; python3 scripts/pdf/generate_pdf.py ${i%}; done", + "build-all-pdfs": "for i in product_docs/docs/**/*/ ; do echo \"$i\"; python3 scripts/pdf/generate_pdf.py ${i%} || exit 1; done", "fix-mtimes": "python3 scripts/source/git-restore-mtime.py", "count": "find product_docs/docs/ advocacy_docs/ external_sources/ -name '*.mdx' | wc -l", "heroku-postbuild": "gatsby build", From dd03a0407120fe64bdaaf00ed6bec3bf7966b6d2 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 12:07:20 -0500 Subject: [PATCH 13/28] Fix some image paths in pem online help for 8.0 and 8.0.1 Former-commit-id: 833fe16f57bbe09e329398abd70353ad30dc19c5 --- .../03_toc_pem_client/01_pem_browser_window.mdx | 2 +- .../03_toc_pem_client/02_pem_toolbar.mdx | 2 +- .../03_toc_pem_client/03_pem_menu_bar.mdx | 10 +++++----- .../03_toc_pem_client/01_pem_browser_window.mdx | 2 +- .../03_toc_pem_client/02_pem_toolbar.mdx | 2 +- .../03_toc_pem_client/03_pem_menu_bar.mdx | 10 +++++----- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx index 5d7b6ba2697..3fad162c3ea 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx @@ -6,7 +6,7 @@ title: "PEM Main Browser Window" The PEM client features a menu bar and a window divided into two panes: the `Browser` tree control in the left pane, and a tabbed browser in the right pane. -![PEM browser window](/../images/pem_browser_window.png) +![PEM browser window](../images/pem_browser_window.png) [Menus](03_pem_menu_bar/#pem_menu_bar) displayed across the top of the browser window provide quick, context-sensitive access to PEM features and functionality. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx index f644511884d..1270bc87da5 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx @@ -6,7 +6,7 @@ title: "Browser Toolbar" The browser toolbar provides shortcut buttons for frequently used features like View Data and the Query Tool which are most frequently used in PEM. This toolbar is visible on the Browser panel. Buttons get enabled/disabled based on the selected browser node. -![Browser Toolbar](/../images/pem_toolbar.png) +![Browser Toolbar](../images/pem_toolbar.png) - Use the [Query Tool](05_keyboard_shortcuts/#query-tool) button to open the Query Tool in the current database context. - Use the [View Data](../08_toc_pem_developer_tools/04_editgrid/#editgrid) button to view/edit the data stored in a selected table. diff --git a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx index 10d7b0aa603..3c900b49320 100644 --- a/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx +++ b/product_docs/docs/pem/8.0.1/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx @@ -14,7 +14,7 @@ Context-sensitive menus across the top of the PEM web interface allow you to cus **The File Menu** -![PEM File menu](/../images/pem_file_menu.png) +![PEM File menu](../images/pem_file_menu.png) Use the `File` menu to access the following options: @@ -27,7 +27,7 @@ Use the `File` menu to access the following options: **The Object Menu** -![PEM Object menu](/../images/pem_object_menu.png) +![PEM Object menu](../images/pem_object_menu.png) The `Object` menu is context-sensitive. Use the `Object` menu to access the following options: @@ -55,7 +55,7 @@ The `Object` menu is context-sensitive. Use the `Object` menu to access the foll **The Management Menu** -![PEM Management menu](/../images/pem_management_menu.png) +![PEM Management menu](../images/pem_management_menu.png) Use the `Management` menu to access the following PEM features: @@ -78,7 +78,7 @@ Use the `Management` menu to access the following PEM features: **The Dashboards Menu** -![PEM Dashboards menu](/../images/pem_dashboards_menu.png) +![PEM Dashboards menu](../images/pem_dashboards_menu.png) The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access the following options: @@ -102,7 +102,7 @@ The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access **The Tools Menu** -![PEM Tools menu](/../images/pem_tool_menu.png) +![PEM Tools menu](../images/pem_tool_menu.png) Use the `Tools` menu to access the following options: diff --git a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx index 851aa0ff97e..829563481d9 100644 --- a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx +++ b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/01_pem_browser_window.mdx @@ -10,7 +10,7 @@ legacyRedirectsGenerated: The PEM client features a menu bar and a window divided into two panes: the `Browser` tree control in the left pane, and a tabbed browser in the right pane. -![PEM browser window](/../images/pem_browser_window.png) +![PEM browser window](../images/pem_browser_window.png) [Menus](03_pem_menu_bar/#pem_menu_bar) displayed across the top of the browser window provide quick, context-sensitive access to PEM features and functionality. diff --git a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx index 8374e063156..2af46c2594d 100644 --- a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx +++ b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/02_pem_toolbar.mdx @@ -10,7 +10,7 @@ legacyRedirectsGenerated: The browser toolbar provides shortcut buttons for frequently used features like View Data and the Query Tool which are most frequently used in PEM. This toolbar is visible on the Browser panel. Buttons get enabled/disabled based on the selected browser node. -![Browser Toolbar](/../images/pem_toolbar.png) +![Browser Toolbar](../images/pem_toolbar.png) - Use the [Query Tool](05_keyboard_shortcuts/#query-tool) button to open the Query Tool in the current database context. - Use the [View Data](../08_toc_pem_developer_tools/04_editgrid/#editgrid) button to view/edit the data stored in a selected table. diff --git a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx index 7506bc4341d..73fc44cf442 100644 --- a/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx +++ b/product_docs/docs/pem/8.0/pem_online_help/03_toc_pem_client/03_pem_menu_bar.mdx @@ -18,7 +18,7 @@ Context-sensitive menus across the top of the PEM web interface allow you to cus **The File Menu** -![PEM File menu](/../images/pem_file_menu.png) +![PEM File menu](../images/pem_file_menu.png) Use the `File` menu to access the following options: @@ -31,7 +31,7 @@ Use the `File` menu to access the following options: **The Object Menu** -![PEM Object menu](/../images/pem_object_menu.png) +![PEM Object menu](../images/pem_object_menu.png) The `Object` menu is context-sensitive. Use the `Object` menu to access the following options: @@ -59,7 +59,7 @@ The `Object` menu is context-sensitive. Use the `Object` menu to access the foll **The Management Menu** -![PEM Management menu](/../images/pem_management_menu.png) +![PEM Management menu](../images/pem_management_menu.png) Use the `Management` menu to access the following PEM features: @@ -82,7 +82,7 @@ Use the `Management` menu to access the following PEM features: **The Dashboards Menu** -![PEM Dashboards menu](/../images/pem_dashboards_menu.png) +![PEM Dashboards menu](../images/pem_dashboards_menu.png) The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access the following options: @@ -106,7 +106,7 @@ The `Dashboards` menu is context-sensitive; use the `Dashboards` menu to access **The Tools Menu** -![PEM Tools menu](/../images/pem_tool_menu.png) +![PEM Tools menu](../images/pem_tool_menu.png) Use the `Tools` menu to access the following options: From f7123a97c68f89671275ef1f9f756de33eef7d9f Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Thu, 4 Mar 2021 17:49:16 +0000 Subject: [PATCH 14/28] New PDFs generated by Github Actions Former-commit-id: 8118abf50dc79fa23b0d3660ef926d12d15e4432 From bd9cb4ccc537b5a2c7490b603584733aa53d53d8 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 12:52:55 -0500 Subject: [PATCH 15/28] Change pdf workflow to do shallow clone Former-commit-id: 60761a0f7513d560514a45b846e33d7f1df281f2 --- .github/workflows/update-pdfs-on-develop.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/update-pdfs-on-develop.yml b/.github/workflows/update-pdfs-on-develop.yml index c5f887b0205..2a62393007f 100644 --- a/.github/workflows/update-pdfs-on-develop.yml +++ b/.github/workflows/update-pdfs-on-develop.yml @@ -13,7 +13,6 @@ jobs: - uses: actions/checkout@v2 with: ref: develop - fetch-depth: 0 # fetch whole repo so git-restore-mtime can work ssh-key: ${{ secrets.ADMIN_SECRET_SSH_KEY }} - name: Update submodules run: git submodule update --init --remote From bbeeba77d0cee658b2c8b4645119539c18e27313 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 16:10:25 -0500 Subject: [PATCH 16/28] Update dev-sources note Co-authored-by: Josh Heyer <63653723+josh-heyer@users.noreply.github.com> Former-commit-id: d20836b13ada9465ba1da2936d865a5b803c4d70 --- gatsby-config.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gatsby-config.js b/gatsby-config.js index 36c2d8cdcdf..200526af3d2 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -78,7 +78,7 @@ const externalSourcePlugins = () => { `${ANSI_BLUE}###### Sourcing from ${sourceFilename} #######${ANSI_STOP}`, ); console.log( - `${ANSI_GREEN}Note that ${sourceFilename} is no longer strictly required - the full set of docs will be loaded in its absence.${ANSI_STOP}`, + `${ANSI_GREEN}Note: ${sourceFilename} is no longer required; delete it to load the full set of docs.${ANSI_STOP}`, ); sources = JSON.parse(gracefulFs.readFileSync(sourceFilename)); } From f8c31299644302c656d68921fd15c10a0d14330f Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 16:30:45 -0500 Subject: [PATCH 17/28] Memoize the mdx components passed into layout, to prevent re-renderings if props don't actually change, specifically katacodaPanelData Former-commit-id: 291f5706f67552e0ea396ec85f49ecdc58bc7ccf --- src/components/code-block.js | 8 ++-- src/components/layout.js | 79 ++++++++++++++++++------------------ 2 files changed, 42 insertions(+), 45 deletions(-) diff --git a/src/components/code-block.js b/src/components/code-block.js index f9ee02cbb46..05bb5de83ae 100644 --- a/src/components/code-block.js +++ b/src/components/code-block.js @@ -130,7 +130,7 @@ const OutputPre = ({ content }) => ( ); -const CodeBlock = ({ children, katacodaPanelData, ...otherProps }) => { +const CodeBlock = ({ children, codeLanguages, ...otherProps }) => { const childIsComponent = !!children.props; // true in normal usage, false if raw
 tags are used
 
   const [codeContent, outputContent] = childIsComponent
@@ -140,10 +140,8 @@ const CodeBlock = ({ children, katacodaPanelData, ...otherProps }) => {
     ? (children.props.className || '').replace('language-', '')
     : 'text';
 
-  const execLanguages = katacodaPanelData
-    ? ['shell'].concat(
-        katacodaPanelData.codelanguages?.split(',')?.map((l) => l.trim()),
-      )
+  const execLanguages = codeLanguages
+    ? ['shell'].concat(codeLanguages?.split(',')?.map((l) => l.trim()))
     : [];
 
   if (codeContent.length > 0) {
diff --git a/src/components/layout.js b/src/components/layout.js
index c81db7a6461..4e81b16df62 100644
--- a/src/components/layout.js
+++ b/src/components/layout.js
@@ -1,4 +1,4 @@
-import React, { useState, useEffect } from 'react';
+import React, { useState, useEffect, useMemo } from 'react';
 import { Helmet } from 'react-helmet';
 import useSiteMetadata from '../hooks/use-sitemetadata';
 import {
@@ -24,8 +24,6 @@ const Layout = ({
   const { baseUrl, imageUrl, title: siteTitle } = useSiteMetadata();
   const meta = pageMeta || {};
   const url = meta.path ? baseUrl + meta.path : baseUrl;
-  // console.log(url);
-
   const title = meta.title ? `EDB Docs - ${meta.title}` : siteTitle;
 
   const [dark, setDark] = useState(false);
@@ -45,6 +43,43 @@ const Layout = ({
     }
   }, [setDark]);
 
+  const mdxComponents = useMemo(
+    () => ({
+      a: ({ href, ...rest }) => (
+        
+      ),
+      table: (props) => (
+        
+ + + ), + pre: (props) => ( + + ), + h2: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content + h3: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content + img: (props) => , // eslint-disable-line jsx-a11y/alt-text + blockquote: (props) => ( +
+ ), + KatacodaPanel: () => ( + + ), + KatacodaPageLink, + Icon, + StubCards, + }), + [katacodaPanelData, meta], + ); + return ( - ( - - ), - table: (props) => ( -
-

- - ), - pre: (props) => ( - - ), - h2: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content - h3: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content - img: (props) => , // eslint-disable-line jsx-a11y/alt-text - blockquote: (props) => ( -
- ), - KatacodaPanel: (props) => ( - - ), - KatacodaPageLink, - Icon, - StubCards, - }} - > - {children} - + {children} ); From 6a9e08b731eaa296ba3ad9e4ea08ff125aba24b0 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Thu, 4 Mar 2021 16:57:05 -0500 Subject: [PATCH 18/28] Safe navigation for CodeBlock codeLanguages prop in layout Former-commit-id: 94b289421987ad725bcc455686fb938ef5dd222c --- src/components/layout.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/components/layout.js b/src/components/layout.js index 4e81b16df62..d4dba5d33b4 100644 --- a/src/components/layout.js +++ b/src/components/layout.js @@ -59,7 +59,10 @@ const Layout = ({ ), pre: (props) => ( - + ), h2: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content h3: (props) =>

, // eslint-disable-line jsx-a11y/heading-has-content From 0cabed7c1e793ef99eed67ab7ad6ea98e4bf1880 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Fri, 5 Mar 2021 15:38:08 -0500 Subject: [PATCH 19/28] Extract algolia indexing out of gatsby-config, upgrade algolia plugin Former-commit-id: 4eb5f4d31c797ac36d52935dbc4ca957d0e6db53 --- gatsby-config.js | 218 +++--------------------------- package.json | 2 +- src/constants/algolia-indexing.js | 177 ++++++++++++++++++++++++ yarn.lock | 8 +- 4 files changed, 204 insertions(+), 201 deletions(-) create mode 100644 src/constants/algolia-indexing.js diff --git a/gatsby-config.js b/gatsby-config.js index 200526af3d2..6182a8f8811 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -1,9 +1,10 @@ require('dotenv').config({ path: `.env.${process.env.NODE_ENV}`, }); -const utf8Truncate = require('truncate-utf8-bytes'); const gracefulFs = require('graceful-fs'); +const algoliaTransformer = require('./src/constants/algolia-indexing.js'); + const ANSI_BLUE = '\033[34m'; const ANSI_GREEN = '\033[32m'; const ANSI_STOP = '\033[0m'; @@ -124,173 +125,7 @@ const indexQuery = ` } `; -const transformNodeForAlgolia = (node) => { - let newNode = node; - newNode['title'] = node.frontmatter.title; - newNode['path'] = node.fields.path; - newNode['type'] = 'guide'; - if (node.frontmatter.product) { - newNode['product'] = node.frontmatter.product; - } - if (node.frontmatter.platform) { - newNode['platform'] = node.frontmatter.platform; - } - - if (node.fields.docType == 'doc') { - newNode['product'] = node.fields.product; - newNode['version'] = node.fields.version; - newNode['productVersion'] = - node.fields.product + ' > ' + node.fields.version; - newNode['type'] = 'doc'; - } - - delete newNode['frontmatter']; - delete newNode['fields']; - return newNode; -}; - -const makePathDictionary = (nodes) => { - let dictionary = {}; - for (let node of nodes) { - dictionary[node.fields.path] = node.frontmatter.title; - } - return dictionary; -}; - -const makeBreadcrumbs = (node, dictionary, advocacy = false) => { - let depth = advocacy ? 3 : 4; - let trail = ''; - const path = node.fields.path; - const pathPieces = path.split('/'); - for (let i = depth; i < pathPieces.length; i++) { - let parentPath = pathPieces.slice(0, i).join('/'); - trail += dictionary[parentPath] + ' / '; - } - return trail; -}; - -const addBreadcrumbsToNodes = (nodes) => { - const pathDictionary = makePathDictionary(nodes); - let newNodes = []; - for (let node of nodes) { - let newNode = node; - const advocacy = !node.fields.product; - newNode['breadcrumb'] = makeBreadcrumbs(node, pathDictionary, advocacy); - newNodes.push(newNode); - } - return newNodes; -}; - -const mdxTreeToSearchNodes = (rootNode) => { - rootNode.depth = 0; - const stack = [rootNode]; - const searchNodes = []; - const initialSearchNode = { text: '', heading: '' }; - - let parseState = { - attribute: 'text', - nextAttribute: null, - transitionDepth: null, - }; - const nextParseStateIfDepth = (depth) => { - if (!parseState.transitionDepth || depth > parseState.transitionDepth) - return; - parseState = { - attribute: parseState.nextAttribute, - nextAttribute: null, - transitionDepth: null, - }; - }; - const setHeadingParseState = (depth) => { - parseState = { - attribute: 'heading', - nextAttribute: 'text', - transitionDepth: depth, - }; - }; - - let searchNode = { ...initialSearchNode }; - let node = null; - while (stack.length > 0) { - node = stack.pop(); - nextParseStateIfDepth(node.depth); - - if (['import', 'export'].includes(node.type)) { - // skip these nodes - continue; - } - - if (node.type === 'heading') { - // break on headings - if (searchNode.text.length > 0) { - searchNodes.push(searchNode); - } - searchNode = { ...initialSearchNode }; - setHeadingParseState(node.depth); - } - - if (node.value && !['html', 'jsx'].includes(node.type)) { - searchNode[parseState.attribute] += ` ${node.value}`; - } else { - (node.children || []) - .slice() - .reverse() - .forEach((child) => { - child.depth = node.depth + 1; - stack.push(child); - }); - } - } - if (searchNode.text.length > '') { - searchNodes.push(searchNode); - } - - return searchNodes; -}; - -const trimSpaces = (str) => { - return str.replace(/\s+/g, ' ').trim(); -}; - -const splitNodeContent = (nodes) => { - const result = []; - for (const node of nodes) { - // skip indexing this content for now - if ( - node.path.includes('/postgresql_journey/') || - node.path.includes('/playground/') - ) { - console.log(`skipped indexing ${node.path}`); - continue; - } - - const searchNodes = mdxTreeToSearchNodes(node.mdxAST); - - searchNodes.forEach((searchNode, i) => { - let newNode = { ...node }; - delete newNode['mdxAST']; - - newNode.id = `${newNode.path}-${i + 1}`; - newNode.heading = trimSpaces(searchNode.heading); - newNode.excerpt = utf8Truncate( - trimSpaces(`${searchNode.heading}: ${searchNode.text}`), - 8000, - ); - if (searchNode.heading.length > 0) { - const anchor = newNode.heading - .split(' ') - .join('-') - .toLowerCase() - .replace('/', ''); - newNode.path = `${newNode.path}#${anchor}`; - } - - result.push(newNode); - }); - } - return result; -}; - +/********** Gatsby config *********/ const netlifyHeaders = () => { if (isProduction) return {}; @@ -299,7 +134,6 @@ const netlifyHeaders = () => { }; }; -/********** Gatsby config *********/ module.exports = { flags: { PRESERVE_WEBPACK_CACHE: true, @@ -438,32 +272,24 @@ module.exports = { whereToIncludeRedirects: '', // defaults to: "server" }, }, + { + // This plugin must be placed last in your list of plugins to ensure that it can query all the GraphQL data + resolve: `gatsby-plugin-algolia`, + options: { + appId: process.env.ALGOLIA_APP_ID, + apiKey: process.env.ALGOLIA_API_KEY, + indexName: algoliaIndex, + queries: [ + { + query: indexQuery, + transformer: algoliaTransformer, + indexName: algoliaIndex, + }, + ], + chunkSize: 1000, + enablePartialUpdates: false, + skipIndexing: process.env.INDEX_ON_BUILD !== 'true', + }, + }, ], }; - -if (process.env.INDEX_ON_BUILD && process.env.INDEX_ON_BUILD !== 'false') { - module.exports['plugins'].push({ - // This plugin must be placed last in your list of plugins to ensure that it can query all the GraphQL data - resolve: `gatsby-plugin-algolia`, - options: { - appId: process.env.ALGOLIA_APP_ID, - apiKey: process.env.ALGOLIA_API_KEY, - indexName: algoliaIndex, - queries: [ - { - query: indexQuery, - transformer: ({ data }) => - splitNodeContent( - addBreadcrumbsToNodes(data.allMdx.nodes).map((node) => - transformNodeForAlgolia(node), - ), - ), - indexName: algoliaIndex, - }, - ], - chunkSize: 1000, // default: 1000, - enablePartialUpdates: false, - skipIndexing: !process.env.INDEX_ON_BUILD, // useless on plugin version 0.13.0, just for posterity - }, - }); -} diff --git a/package.json b/package.json index a66e74f78a5..701dd8d676f 100644 --- a/package.json +++ b/package.json @@ -37,7 +37,7 @@ "gatsby": "^2.31.1", "gatsby-cli": "^2.12.93", "gatsby-image": "^2.4.1", - "gatsby-plugin-algolia": "^0.13.0", + "gatsby-plugin-algolia": "^0.16.3", "gatsby-plugin-catch-links": "^2.6.0", "gatsby-plugin-google-fonts": "^1.0.1", "gatsby-plugin-google-tagmanager": "^2.10.0", diff --git a/src/constants/algolia-indexing.js b/src/constants/algolia-indexing.js new file mode 100644 index 00000000000..3087775c7a2 --- /dev/null +++ b/src/constants/algolia-indexing.js @@ -0,0 +1,177 @@ +const utf8Truncate = require('truncate-utf8-bytes'); + +const transformNodeForAlgolia = (node) => { + let newNode = node; + newNode['title'] = node.frontmatter.title; + newNode['path'] = node.fields.path; + newNode['type'] = 'guide'; + if (node.frontmatter.product) { + newNode['product'] = node.frontmatter.product; + } + if (node.frontmatter.platform) { + newNode['platform'] = node.frontmatter.platform; + } + + if (node.fields.docType == 'doc') { + newNode['product'] = node.fields.product; + newNode['version'] = node.fields.version; + newNode['productVersion'] = + node.fields.product + ' > ' + node.fields.version; + newNode['type'] = 'doc'; + } + + delete newNode['frontmatter']; + delete newNode['fields']; + return newNode; +}; + +const makePathDictionary = (nodes) => { + let dictionary = {}; + for (let node of nodes) { + dictionary[node.fields.path] = node.frontmatter.title; + } + return dictionary; +}; + +const makeBreadcrumbs = (node, dictionary, advocacy = false) => { + let depth = advocacy ? 3 : 4; + let trail = ''; + const path = node.fields.path; + const pathPieces = path.split('/'); + for (let i = depth; i < pathPieces.length; i++) { + let parentPath = pathPieces.slice(0, i).join('/'); + trail += dictionary[parentPath] + ' / '; + } + return trail; +}; + +const addBreadcrumbsToNodes = (nodes) => { + const pathDictionary = makePathDictionary(nodes); + let newNodes = []; + for (let node of nodes) { + let newNode = node; + const advocacy = !node.fields.product; + newNode['breadcrumb'] = makeBreadcrumbs(node, pathDictionary, advocacy); + newNodes.push(newNode); + } + return newNodes; +}; + +const mdxTreeToSearchNodes = (rootNode) => { + rootNode.depth = 0; + const stack = [rootNode]; + const searchNodes = []; + const initialSearchNode = { text: '', heading: '' }; + + let parseState = { + attribute: 'text', + nextAttribute: null, + transitionDepth: null, + }; + const nextParseStateIfDepth = (depth) => { + if (!parseState.transitionDepth || depth > parseState.transitionDepth) + return; + parseState = { + attribute: parseState.nextAttribute, + nextAttribute: null, + transitionDepth: null, + }; + }; + const setHeadingParseState = (depth) => { + parseState = { + attribute: 'heading', + nextAttribute: 'text', + transitionDepth: depth, + }; + }; + + let searchNode = { ...initialSearchNode }; + let node = null; + while (stack.length > 0) { + node = stack.pop(); + nextParseStateIfDepth(node.depth); + + if (['import', 'export'].includes(node.type)) { + // skip these nodes + continue; + } + + if (node.type === 'heading') { + // break on headings + if (searchNode.text.length > 0) { + searchNodes.push(searchNode); + } + searchNode = { ...initialSearchNode }; + setHeadingParseState(node.depth); + } + + if (node.value && !['html', 'jsx'].includes(node.type)) { + searchNode[parseState.attribute] += ` ${node.value}`; + } else { + (node.children || []) + .slice() + .reverse() + .forEach((child) => { + child.depth = node.depth + 1; + stack.push(child); + }); + } + } + if (searchNode.text.length > '') { + searchNodes.push(searchNode); + } + + return searchNodes; +}; + +const trimSpaces = (str) => { + return str.replace(/\s+/g, ' ').trim(); +}; + +const splitNodeContent = (nodes) => { + const result = []; + for (const node of nodes) { + // skip indexing this content for now + if ( + node.path.includes('/postgresql_journey/') || + node.path.includes('/playground/') + ) { + console.log(`skipped indexing ${node.path}`); + continue; + } + + const searchNodes = mdxTreeToSearchNodes(node.mdxAST); + + searchNodes.forEach((searchNode, i) => { + let newNode = { ...node }; + delete newNode['mdxAST']; + + newNode.id = `${newNode.path}-${i + 1}`; + newNode.heading = trimSpaces(searchNode.heading); + newNode.excerpt = utf8Truncate( + trimSpaces(`${searchNode.heading}: ${searchNode.text}`), + 8000, + ); + if (searchNode.heading.length > 0) { + const anchor = newNode.heading + .split(' ') + .join('-') + .toLowerCase() + .replace('/', ''); + newNode.path = `${newNode.path}#${anchor}`; + } + + result.push(newNode); + }); + } + return result; +}; + +const algoliaTransformer = ({ data }) => + splitNodeContent( + addBreadcrumbsToNodes(data.allMdx.nodes).map((node) => + transformNodeForAlgolia(node), + ), + ); + +module.exports = algoliaTransformer; diff --git a/yarn.lock b/yarn.lock index 6486cab0c74..e1ca1926fe6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6741,10 +6741,10 @@ gatsby-page-utils@^0.9.0: lodash "^4.17.20" micromatch "^4.0.2" -gatsby-plugin-algolia@^0.13.0: - version "0.13.0" - resolved "https://registry.yarnpkg.com/gatsby-plugin-algolia/-/gatsby-plugin-algolia-0.13.0.tgz#d73ba83f5a449313356c1897c2d2ecf0d8efa21c" - integrity sha512-hx+J9IfHuuxxENtz/GlBQxbiMPCu71dWI32EKVvas0T57SIZES1YO2Xcck1TEJpDHjU59tO/cLFJ/3EJTUBl6Q== +gatsby-plugin-algolia@^0.16.3: + version "0.16.3" + resolved "https://registry.yarnpkg.com/gatsby-plugin-algolia/-/gatsby-plugin-algolia-0.16.3.tgz#f455cdc30d9b10ee3d417a9052167339109f0793" + integrity sha512-1ErBAsPtDL/uvUpweBCNpzkMFamXnYYUVdhFnpx488PFAl7N26I0Jsk+TcoHCMTTp9GGRahF8w7VBvv7jtKUAQ== dependencies: algoliasearch "^3.24.5" gatsby-cli "^1.1.58" From c04251eb996819f29cfbe48041f33e5da5837b38 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Fri, 5 Mar 2021 15:50:32 -0500 Subject: [PATCH 20/28] Move netlify headers into ternary in the actual config object - the old method seemed to bust the cache Former-commit-id: 886f5acaafb3b3ce94440b47e8ec06591fcf5fe9 --- gatsby-config.js | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/gatsby-config.js b/gatsby-config.js index 6182a8f8811..b25303d7dae 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -126,14 +126,6 @@ const indexQuery = ` `; /********** Gatsby config *********/ -const netlifyHeaders = () => { - if (isProduction) return {}; - - return { - '/*': ['X-Robots-Tag: noindex'], - }; -}; - module.exports = { flags: { PRESERVE_WEBPACK_CACHE: true, @@ -159,7 +151,9 @@ module.exports = { { resolve: 'gatsby-plugin-netlify', options: { - headers: netlifyHeaders(), + headers: { + '/*': isProduction ? [] : ['X-Robots-Tag: noindex'], + }, }, }, // 'gatsby-plugin-remove-fingerprints', // speeds up Netlify, see https://github.com/narative/gatsby-plugin-remove-fingerprints From c6f4ef431c6af0c1f0398ce6cb11d43b4470d12a Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Mon, 8 Mar 2021 14:16:39 +0530 Subject: [PATCH 21/28] Minor update Former-commit-id: ff851e38d254627c96dffbe80ba119fe57f3fe0e --- .../01_supported_database_server_versions.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx index 7d4ba9703b0..a0991827cc2 100644 --- a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx +++ b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx @@ -4,7 +4,7 @@ title: "Supported Database Server Versions" -Language Pack installers are version and platform specific; select the Language Pack installer that corresponds to your EDB Postgres Advanced Server or PostgreSQL server version: +Language Pack installers are version and platform specific; select the Language Pack installer that corresponds to your EDB Postgres Advanced Server or PostgreSQL server version that are installed through interactive installer. **Linux:** @@ -16,9 +16,9 @@ For detailed information, please see the EDB Postgres Advanced Server Installati **Mac OS:** -| PostgreSQL Version | Language Pack Version | Procedural Language Version | -| -------------------------- | --------------------- | ------------------------------ | -| `9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | +| PostgreSQL Version | Language Pack Version | Procedural Language Version | +| ----------------------------------------------- | --------------------- | ------------------------------ | +| `9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | **Windows 32:** From c432a350a91fdceefdbca1109fd8f1fded43390d Mon Sep 17 00:00:00 2001 From: Robert Stringer Date: Mon, 8 Mar 2021 14:33:23 +0000 Subject: [PATCH 22/28] rs: refactor table Former-commit-id: e5551f50c9edfbafebf9bf45e07e7682af816a48 --- .../01_supported_database_server_versions.mdx | 32 +++++-------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx index a0991827cc2..3fea724fd7c 100644 --- a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx +++ b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx @@ -6,29 +6,13 @@ title: "Supported Database Server Versions" Language Pack installers are version and platform specific; select the Language Pack installer that corresponds to your EDB Postgres Advanced Server or PostgreSQL server version that are installed through interactive installer. -**Linux:** - -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | +|Operating System | Product and Version | Procedural Language Version | +| ----------------- | ----------------------------------------------------- | ------------------------------------ | +| Linux | EDB Postgres Advanced Server `9.6`,`10` | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Linux | PostgreSQL `9.6`, `10` | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| MacOS | PostgreSQL `9.6`,`10`,`11`,`12`,`13` | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Windows (32-bit) | PostgreSQL `9.6`,`10` | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Windows (64-bit) | PostgreSQL `9.6`,`10`,`11`,`12`,`13` | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Windows (64-bit) | EDB Postgres Advanced Server `9.6`,`10`,`11`,`12`,`13`| Perl `5.26`, Python `3.7`, Tcl `8.6` | For detailed information, please see the EDB Postgres Advanced Server Installation Guide for Linux, available at the [EDB website](/epas/latest/). - -**Mac OS:** - -| PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | - -**Windows 32:** - -| PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | - -**Windows 64:** - -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ------------------------------------------------- | --------------------- | ------------------------------ | -| `PostgreSQL 9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.8 | -| `EDB Postgres Advanced Server 9.6, 10, 11, 12, 13`| 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | From 0d3f60929476582ea44385bb4859950412713c07 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Mon, 8 Mar 2021 15:57:31 -0500 Subject: [PATCH 23/28] Update layout mdxComponents to not break memoization on meta object, but specific keys instead Former-commit-id: 9f45269d1408447a677d6574eceffacfe2f105cf --- src/components/layout.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/layout.js b/src/components/layout.js index d4dba5d33b4..c28c3055cd0 100644 --- a/src/components/layout.js +++ b/src/components/layout.js @@ -80,7 +80,7 @@ const Layout = ({ Icon, StubCards, }), - [katacodaPanelData, meta], + [katacodaPanelData, meta.path, meta.isIndexPage], ); return ( From f20de2e04fb5873058865c220ff2590c1057a368 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Mon, 8 Mar 2021 18:27:18 -0500 Subject: [PATCH 24/28] Remove post-build.sh - instead use onPostBuild to clean up _redirects Former-commit-id: ed4e09dd09f31540b7530603ca2f6c04b1f68e6d --- gatsby-node.js | 17 +++++++++++++++++ package.json | 2 +- scripts/post-build.sh | 8 -------- src/constants/gatsby-node-utils.js | 18 ++++++++++++++++++ 4 files changed, 36 insertions(+), 9 deletions(-) delete mode 100644 scripts/post-build.sh diff --git a/gatsby-node.js b/gatsby-node.js index 2e1ff8415df..57624d88202 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -23,6 +23,8 @@ const { findPrevNextNavNodes, configureRedirects, configureLegacyRedirects, + readFile, + writeFile, } = require('./src/constants/gatsby-node-utils.js'); const isBuild = process.env.NODE_ENV === 'production'; @@ -412,3 +414,18 @@ exports.onPreBootstrap = () => { `); }; + +exports.onPostBuild = async ({ reporter, pathPrefix }) => { + const originalRedirects = await readFile('public/_redirects'); + + // filter out legacyRedirects that are loaded via nginx, not netlify + let filteredRedirects = originalRedirects + .split('\n') + .filter((line) => !line.startsWith(`${pathPrefix}/edb-docs/`)) + .join('\n'); + + await writeFile( + 'public/_redirects', + `${filteredRedirects}\n\n# Netlify pathPrefix path rewrite\n${pathPrefix}/* /:splat 200`, + ); +}; diff --git a/package.json b/package.json index 701dd8d676f..5bfb9fb440a 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ "develop": "gatsby develop", "config-sources": "python3 scripts/source/config_sources.py", "format": "prettier --write src/**/*.js gatsby-*.js", - "build": "gatsby build --prefix-paths && bash scripts/post-build.sh", + "build": "gatsby build --prefix-paths", "serve-build": "gatsby serve --prefix-paths", "update-icons": "git submodule update --init --remote && node scripts/createIconTypes.js && node scripts/createIconNames.js", "build-pdf": "python3 scripts/pdf/generate_pdf.py", diff --git a/scripts/post-build.sh b/scripts/post-build.sh deleted file mode 100644 index d903e459964..00000000000 --- a/scripts/post-build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -echo "post-build script starting" -if test -f "public/_redirects"; then - echo "writing path prefix to public/_redirects" - printf "\n\n/docs/* /:splat 200" >> public/_redirects -fi -echo "post-build script complete" diff --git a/src/constants/gatsby-node-utils.js b/src/constants/gatsby-node-utils.js index 17b1386c69a..ef6d0950181 100644 --- a/src/constants/gatsby-node-utils.js +++ b/src/constants/gatsby-node-utils.js @@ -1,3 +1,5 @@ +const fs = require('fs'); + const sortVersionArray = (versions) => { return versions .map((version) => version.replace(/\d+/g, (n) => +n + 100000)) @@ -308,6 +310,20 @@ const configureLegacyRedirects = ({ }); }; +const readFile = (filePath) => + new Promise(function (resolve, reject) { + fs.readFile(filePath, 'utf8', function (err, data) { + err ? reject(err) : resolve(data); + }); + }); + +const writeFile = (filePath, data) => + new Promise(function (resolve, reject) { + fs.writeFile(filePath, data, function (err) { + err ? reject(err) : resolve(); + }); + }); + module.exports = { sortVersionArray, replacePathVersion, @@ -325,4 +341,6 @@ module.exports = { findPrevNextNavNodes, configureRedirects, configureLegacyRedirects, + readFile, + writeFile, }; From ed2233adc54955808f5783d091a0635449b59b0b Mon Sep 17 00:00:00 2001 From: Manjusha Vaidya Date: Tue, 9 Mar 2021 15:13:11 +0530 Subject: [PATCH 25/28] table_reformatting Former-commit-id: 39b35a1b602f3f11896cbabb21756a3fbcd306ec --- .../01_supported_database_server_versions.mdx | 32 +++++-------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx index a0991827cc2..74292e82205 100644 --- a/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx +++ b/product_docs/docs/epas/13/language_pack/01_supported_database_server_versions.mdx @@ -6,29 +6,13 @@ title: "Supported Database Server Versions" Language Pack installers are version and platform specific; select the Language Pack installer that corresponds to your EDB Postgres Advanced Server or PostgreSQL server version that are installed through interactive installer. -**Linux:** - -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | +| Operating System | Product and Version | Language Pack Version | Procedural Language Version | +| ----------------- | ----------------------------------------------------- | ----------------------| ------------------------------------ | +| Linux | EDB Postgres Advanced Server `9.6`,`10` | 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Linux | PostgreSQL `9.6`, `10` | 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| MacOS | PostgreSQL `9.6`,`10`,`11`,`12`,`13` | 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Windows (32-bit) | PostgreSQL `9.6`,`10` | 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.6` | +| Windows (64-bit) | PostgreSQL `9.6`,`10`,`11`,`12`,`13` | 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.8` | +| Windows (64-bit) | EDB Postgres Advanced Server `9.6`,`10`,`11`,`12`,`13`| 1.0 | Perl `5.26`, Python `3.7`, Tcl `8.6` | For detailed information, please see the EDB Postgres Advanced Server Installation Guide for Linux, available at the [EDB website](/epas/latest/). - -**Mac OS:** - -| PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | - -**Windows 32:** - -| PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ----------------------------------------------- | --------------------- | ------------------------------ | -| `9.6, 10` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | - -**Windows 64:** - -| EDB Postgres Advanced Server/PostgreSQL Version | Language Pack Version | Procedural Language Version | -| ------------------------------------------------- | --------------------- | ------------------------------ | -| `PostgreSQL 9.6, 10, 11, 12, 13` | 1.0 | Perl 5.26, Python 3.7, Tcl 8.8 | -| `EDB Postgres Advanced Server 9.6, 10, 11, 12, 13`| 1.0 | Perl 5.26, Python 3.7, Tcl 8.6 | From 75baccb6b1ba7429be21680ca2a96ea0a5dbac70 Mon Sep 17 00:00:00 2001 From: josh-heyer Date: Tue, 9 Mar 2021 11:37:55 +0000 Subject: [PATCH 26/28] New PDFs generated by Github Actions Former-commit-id: ca2279191e214deda45cf503fd8cba9c97fc82f1 From 5b38ec3eef0cab8fbe375c766ca5f8a024cf624f Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Tue, 9 Mar 2021 11:55:05 -0500 Subject: [PATCH 27/28] Switch to EDB's algolia account Former-commit-id: 32f2b4b2d42fc120cd7221af1a13c8c494791668 --- .github/workflows/deploy-develop.yml | 2 +- .github/workflows/deploy-main.yml | 2 +- gatsby-config.js | 2 +- src/components/search/index.js | 4 ++-- src/pages/404.js | 4 ++-- src/pages/search.js | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/deploy-develop.yml b/.github/workflows/deploy-develop.yml index 86afae54342..08139803103 100644 --- a/.github/workflows/deploy-develop.yml +++ b/.github/workflows/deploy-develop.yml @@ -47,7 +47,7 @@ jobs: NODE_OPTIONS: --max-old-space-size=4096 ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} - ALGOLIA_INDEX_NAME: edb-staging + ALGOLIA_INDEX_NAME: edb-docs-staging INDEX_ON_BUILD: true - name: Netlify deploy diff --git a/.github/workflows/deploy-main.yml b/.github/workflows/deploy-main.yml index 99b25dbf82b..59b7cf8b129 100644 --- a/.github/workflows/deploy-main.yml +++ b/.github/workflows/deploy-main.yml @@ -47,7 +47,7 @@ jobs: NODE_OPTIONS: --max-old-space-size=4096 ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} - ALGOLIA_INDEX_NAME: edb + ALGOLIA_INDEX_NAME: edb-docs GTM_ID: GTM-5W8M67 INDEX_ON_BUILD: true diff --git a/gatsby-config.js b/gatsby-config.js index b25303d7dae..2b8dce18413 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -11,7 +11,7 @@ const ANSI_STOP = '\033[0m'; const isBuild = process.env.NODE_ENV === 'production'; const isProduction = process.env.APP_ENV === 'production'; -const algoliaIndex = process.env.ALGOLIA_INDEX_NAME || 'edb-staging'; +const algoliaIndex = process.env.ALGOLIA_INDEX_NAME || 'edb-docs-staging'; /******** Sourcing *********/ const sourceFilename = isBuild ? 'build-sources.json' : 'dev-sources.json'; diff --git a/src/components/search/index.js b/src/components/search/index.js index 6ae6aa089a0..adb5f5a9c79 100644 --- a/src/components/search/index.js +++ b/src/components/search/index.js @@ -16,8 +16,8 @@ import { SlashIndicator, ClearButton, SearchPane } from './formComps'; import useSiteMetadata from '../../hooks/use-sitemetadata'; const searchClient = algoliasearch( - 'NQVJGNW933', - '3089ae4f190ea7c91590336eeba5f0ea', + 'HXNAF5X3I8', + 'fb05499144f0399f5985485b624a0290', ); const useClickOutside = (ref, handler, events) => { diff --git a/src/pages/404.js b/src/pages/404.js index 52cefee22fa..852470ada8c 100644 --- a/src/pages/404.js +++ b/src/pages/404.js @@ -14,8 +14,8 @@ import useSiteMetadata from '../hooks/use-sitemetadata'; import usePathPrefix from '../hooks/use-path-prefix'; const searchClient = algoliasearch( - 'NQVJGNW933', - '3089ae4f190ea7c91590336eeba5f0ea', + 'HXNAF5X3I8', + 'fb05499144f0399f5985485b624a0290', ); const buildQuery = (pathname, pathPrefix) => { diff --git a/src/pages/search.js b/src/pages/search.js index eb8661f5971..57264ff7c03 100644 --- a/src/pages/search.js +++ b/src/pages/search.js @@ -13,8 +13,8 @@ import { import useSiteMetadata from '../hooks/use-sitemetadata'; const searchClient = algoliasearch( - 'NQVJGNW933', - '3089ae4f190ea7c91590336eeba5f0ea', + 'HXNAF5X3I8', + 'fb05499144f0399f5985485b624a0290', ); const Search = (data) => { From be48fcc5876950086205d8592d79f929fb322ff3 Mon Sep 17 00:00:00 2001 From: Evan Barger Date: Tue, 9 Mar 2021 12:40:34 -0500 Subject: [PATCH 28/28] Add warning if no filtering happens Former-commit-id: 335ba169dfe39e8ee73d5da194febca2a96a9b50 --- gatsby-node.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gatsby-node.js b/gatsby-node.js index 57624d88202..d77f483bffd 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -424,6 +424,10 @@ exports.onPostBuild = async ({ reporter, pathPrefix }) => { .filter((line) => !line.startsWith(`${pathPrefix}/edb-docs/`)) .join('\n'); + if (filteredRedirects.length === originalRedirects.length) { + reporter.warn('no redirects were filtered out, did something change?'); + } + await writeFile( 'public/_redirects', `${filteredRedirects}\n\n# Netlify pathPrefix path rewrite\n${pathPrefix}/* /:splat 200`,