diff --git a/aux/config.yml b/aux/config.yml index d5f115a..2c04aa2 100644 --- a/aux/config.yml +++ b/aux/config.yml @@ -122,17 +122,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster - :@keys - :@append_hosts - :@host_passwords -cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer - global_config: *5 - screen_name: Cluster Configuration Finalizer - exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException' - yaml_exclude: - - :@nlog - instance_variables: - - :@global_config - - :@screen_name - - :@exception_type - - :@yaml_exclude fencing: !ruby/object:SapHA::Configuration::Fencing global_config: *5 screen_name: Fencing Mechanism diff --git a/aux/config_prd.yml b/aux/config_prd.yml index 5957479..2e15fed 100644 --- a/aux/config_prd.yml +++ b/aux/config_prd.yml @@ -121,17 +121,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster - :@enable_csync2 - :@keys - :@append_hosts -cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer - global_config: *5 - screen_name: Cluster Configuration Finalizer - exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException' - yaml_exclude: - - :@nlog - instance_variables: - - :@global_config - - :@screen_name - - :@exception_type - - :@yaml_exclude fencing: !ruby/object:SapHA::Configuration::Fencing global_config: *5 screen_name: Fencing Mechanism diff --git a/aux/config_prd_sps03.yml b/aux/config_prd_sps03.yml index eb54705..958588b 100644 --- a/aux/config_prd_sps03.yml +++ b/aux/config_prd_sps03.yml @@ -122,17 +122,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster - :@enable_csync2 - :@keys - :@append_hosts -cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer - global_config: *5 - screen_name: Cluster Configuration Finalizer - exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException' - yaml_exclude: - - :@nlog - instance_variables: - - :@global_config - - :@screen_name - - :@exception_type - - :@yaml_exclude fencing: !ruby/object:SapHA::Configuration::Fencing global_config: *5 screen_name: Fencing Mechanism diff --git a/aux/is_hana_running.sh b/aux/is_hana_running.sh new file mode 100644 index 0000000..75f7b4d --- /dev/null +++ b/aux/is_hana_running.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# + +while true +do + if [ "$( /usr/sap/hostctrl/exe/sapcontrol -nr 00 -function GetProcessList | grep hdbindexserver )" ]; then + echo -n "RUN " >> /var/log/hana-state + date >> /var/log/hana-state + else + echo -n "NOT " >> /var/log/hana-state + date >> /var/log/hana-state + fi + sleep 1 +done + diff --git a/package/yast2-sap-ha.changes b/package/yast2-sap-ha.changes index b8b919b..e243c31 100644 --- a/package/yast2-sap-ha.changes +++ b/package/yast2-sap-ha.changes @@ -1,3 +1,27 @@ +------------------------------------------------------------------- +Wed Nov 29 07:52:36 UTC 2023 - Peter Varkoly + +- yast2-sap-ha setup workflow is bad (bsc#1217596) + Reworking the workflow: + 1. Setting up SAP HANA System Replication + 2. Setting up SAP HANA HA/DR providers + 3. Confiugring the base cluster on all nodes + 4. Configuring cluster properties and resources with the new function HANA.finalize + The whole class ClusterFinlizer was removed. +- 6.0.0 + +------------------------------------------------------------------- +Thu Nov 9 08:31:53 UTC 2023 - Peter Varkoly + +- yast2-sap-ha wizard terminates abruptly when save configuration option + is selected post configuration (bsc#1214603) +- yast2-sap-ha does not set global_allocation_limit for non productive database + (bsc#1216651) +- Take care that the read values from the saved configuration will + not be overridden during initialization of the modules +- Check if the required HANA systems are installed on the nodes. +- 5.0.1 + ------------------------------------------------------------------- Mon Aug 7 05:13:47 UTC 2023 - Peter Varkoly diff --git a/package/yast2-sap-ha.spec b/package/yast2-sap-ha.spec index 314e75b..f90bab5 100644 --- a/package/yast2-sap-ha.spec +++ b/package/yast2-sap-ha.spec @@ -17,7 +17,7 @@ Name: yast2-sap-ha -Version: 5.0.0 +Version: 6.0.0 Release: 0 BuildArch: noarch Source0: %{name}-%{version}.tar.bz2 @@ -32,7 +32,7 @@ Requires: hawk2 Requires: pacemaker Requires: rubygem(%{rb_default_ruby_abi}:xmlrpc) Requires: yast2 -Requires: yast2-cluster >= 4.3.8 +Requires: yast2-cluster >= 4.4.4 Requires: yast2-ruby-bindings Requires: yast2-ntp-client # for opening URLs @@ -41,7 +41,9 @@ Requires: xdg-utils Requires: expect Requires: firewalld Requires: openssh +%ifarch x86_64 ppc64le Requires: HANA-Firewall >= 2.0.3 +%endif Requires: util-linux Requires: SAPHanaSR Requires: kmod diff --git a/src/clients/sap_ha.rb b/src/clients/sap_ha.rb index a0efbd1..834d925 100644 --- a/src/clients/sap_ha.rb +++ b/src/clients/sap_ha.rb @@ -312,7 +312,9 @@ def scenario_selection log.debug "--- called #{self.class}.#{__callee__}:: ret is #{selection.class} ---" if selection.is_a?(SapHA::HAConfiguration) @config = selection + log.debug "-- @config #{@config.to_yaml}" @config.refresh_all_proposals + log.debug "-- @config after refresh #{@config.to_yaml}" return :next end selection diff --git a/src/data/sap_ha/GLOBAL_INI_NON_PROD.erb b/src/data/sap_ha/GLOBAL_INI_NON_PROD.erb new file mode 100644 index 0000000..333df6e --- /dev/null +++ b/src/data/sap_ha/GLOBAL_INI_NON_PROD.erb @@ -0,0 +1,2 @@ +[memorymanager] +global_allocation_limit = <%= @production_constraints[:global_alloc_limit_non] -%> diff --git a/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb b/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb index a9e6414..b35ce3d 100644 --- a/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb +++ b/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb @@ -1,5 +1,5 @@ [memorymanager] -global_allocation_limit = <%= @production_constraints[:global_alloc_limit] -%> +global_allocation_limit = <%= @production_constraints[:global_alloc_limit_prod] -%> [system_replication] preload_column_tables = <%= @production_constraints[:preload_column_tables] -%> diff --git a/src/data/sap_ha/scenarios.yaml b/src/data/sap_ha/scenarios.yaml index 253b802..5c13e69 100644 --- a/src/data/sap_ha/scenarios.yaml +++ b/src/data/sap_ha/scenarios.yaml @@ -12,8 +12,8 @@ - ntp - watchdog - fencing - - cluster - hana + - cluster screen_sequence: &id002 - prerequisites - communication_layer diff --git a/src/data/sap_ha/tmpl_cluster_config.erb b/src/data/sap_ha/tmpl_cluster_config.erb index b3ac6d3..cd380d6 100644 --- a/src/data/sap_ha/tmpl_cluster_config.erb +++ b/src/data/sap_ha/tmpl_cluster_config.erb @@ -40,7 +40,7 @@ primitive rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> ocf:suse:SAPHana \ meta priority="100" ms msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> \ - meta clone-max="2" clone-node-max="1" interleave="true" + meta clone-max="2" clone-node-max="1" interleave="true" maintenance="true" primitive rsc_ip_<%= @system_id -%>_HDB<%= @instance -%> ocf:heartbeat:IPaddr2 \ op monitor interval="10" timeout="20" \ diff --git a/src/lib/sap_ha/configuration.rb b/src/lib/sap_ha/configuration.rb index 7eca746..7c3ca97 100644 --- a/src/lib/sap_ha/configuration.rb +++ b/src/lib/sap_ha/configuration.rb @@ -26,7 +26,6 @@ require "sap_ha/helpers" require "sap_ha/node_logger" require "sap_ha/configuration/cluster" -require "sap_ha/configuration/cluster_finalizer" require "sap_ha/configuration/fencing" require "sap_ha/configuration/watchdog" require "sap_ha/configuration/hana" @@ -53,7 +52,6 @@ class HAConfiguration :watchdog, :hana, :ntp, - :cluster_finalizer, :imported, :unattended, :completed, @@ -80,7 +78,6 @@ def initialize(role = :master) @scenario_summary = nil @yaml_configuration = load_scenarios @cluster = Configuration::Cluster.new(self) - @cluster_finalizer = Configuration::ClusterFinalizer.new(self) @fencing = Configuration::Fencing.new(self) @watchdog = Configuration::Watchdog.new(self) @hana = Configuration::HANA.new(self) @@ -94,7 +91,7 @@ def initialize(role = :master) # loading an old configuration to detect new hardware. def refresh_all_proposals @watchdog.refresh_proposals - @fencing.read_system + @fencing.refresh_proposals end # Product ID setter. Raises an ScenarioNotFoundException if the ID was not found @@ -233,6 +230,7 @@ def collect_log def write_config log.debug "--- called #{self.class}.#{__callee__} ---" + @timestamp = Time.now SapHA::Helpers.write_var_file("configuration.yml", dump(false, true), timestamp: @timestamp) end diff --git a/src/lib/sap_ha/configuration/cluster.rb b/src/lib/sap_ha/configuration/cluster.rb index 742e994..2fd945b 100644 --- a/src/lib/sap_ha/configuration/cluster.rb +++ b/src/lib/sap_ha/configuration/cluster.rb @@ -247,6 +247,13 @@ def other_nodes ips end + # return all IPs of the first ring + def all_nodes + ips = @nodes.map { |_, n| n[:ip_ring1] } + return [] if ips.any?(&:empty?) + ips + end + def set_host_password(ip, password) node = @nodes.values.find { |v| v[:ip_ring1] == ip } if node.nil? @@ -280,12 +287,12 @@ def other_nodes_ext def get_primary_on_primary SapHA::System::Network.ip_addresses.each do |my_ip| - @nodes.each do |_, node| - if node[:ip_ring1] == my_ip - return node[:host_name] - end - end - end + @nodes.each do |_, node| + if node[:ip_ring1] == my_ip + return node[:host_name] + end + end + end return nil end @@ -374,10 +381,7 @@ def apply(role) @nlog.log_status(status, "Exported configuration for yast2-cluster", "Could not export configuration for yast2-cluster") flag &= status - #Handle firewall - SapHA::System::Local.config_firewall(@fw_config, role) flag &= SapHA::System::Local.start_cluster_services - flag &= SapHA::System::Local.cluster_maintenance(:on) if role == :master flag &= SapHA::System::Local.add_stonith_resource if role == :master flag end diff --git a/src/lib/sap_ha/configuration/cluster_finalizer.rb b/src/lib/sap_ha/configuration/cluster_finalizer.rb deleted file mode 100644 index d75b348..0000000 --- a/src/lib/sap_ha/configuration/cluster_finalizer.rb +++ /dev/null @@ -1,59 +0,0 @@ -# encoding: utf-8 - -# ------------------------------------------------------------------------------ -# Copyright (c) 2016 SUSE Linux GmbH, Nuernberg, Germany. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of version 2 of the GNU General Public License as published by the -# Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, contact SUSE Linux GmbH. -# -# ------------------------------------------------------------------------------ -# -# Summary: SUSE High Availability Setup for SAP Products: Cluster members configuration -# Authors: Ilya Manyugin - -require "yast" -require "erb" -require "socket" -require_relative "base_config" -require "sap_ha/system/local" -require "sap_ha/exceptions" - -Yast.import "UI" - -module SapHA - module Configuration - # Cluster members configuration finalizer - class ClusterFinalizer < BaseConfig - def initialize(global_config) - super - log.debug "--- #{self.class}.#{__callee__} ---" - @screen_name = "Cluster Configuration Finalizer" - end - - def configured? - true - end - - def description - "" - end - - def apply(role) - if role == :master - SapHA::System::Local.cluster_maintenance(:off) - @global_config.completed = true - else - true - end - end - end - end -end diff --git a/src/lib/sap_ha/configuration/fencing.rb b/src/lib/sap_ha/configuration/fencing.rb index 9b6cd2c..0d28812 100644 --- a/src/lib/sap_ha/configuration/fencing.rb +++ b/src/lib/sap_ha/configuration/fencing.rb @@ -28,7 +28,7 @@ module Configuration # Fencing configuration class Fencing < BaseConfig attr_reader :proposals, :sysconfig - attr_accessor :sbd_options, :sbd_delayed_start + attr_accessor :sbd_options, :sbd_delayed_start, :devices include Yast::UIShortcuts include Yast::Logger @@ -83,7 +83,9 @@ def list_items(key) end def table_items - @devices.each_with_index.map { |e, i| Item(Id(i), (i + 1).to_s, e) } + de_items = @devices.each_with_index.map { |e, i| Item(Id(i), (i + 1).to_s, e) } + log.debug "table_items #{@devices}" + return de_items end def popup_validator(check, dev_path) @@ -144,8 +146,11 @@ def apply(role) flag end - private + def refresh_proposals + @proposals = SapHA::System::Local.block_devices + end + private def handle_sysconfig handle = ->(sett, default) { (sett.nil? || sett.empty?) ? default : sett } @devices = handle.call(@sysconfig[:device], "").split(";") @@ -154,9 +159,6 @@ def handle_sysconfig true end - def refresh_proposals - @proposals = SapHA::System::Local.block_devices - end end end end diff --git a/src/lib/sap_ha/configuration/hana.rb b/src/lib/sap_ha/configuration/hana.rb index cb6266a..547964b 100644 --- a/src/lib/sap_ha/configuration/hana.rb +++ b/src/lib/sap_ha/configuration/hana.rb @@ -94,8 +94,10 @@ def initialize(global_config) def additional_instance=(value) @additional_instance = value return unless value + @prefer_takeover = false @production_constraints = { - global_alloc_limit: "0", + global_alloc_limit_prod: "0", + global_alloc_limit_non: "0", preload_column_tables: "false" } end @@ -110,6 +112,7 @@ def configured? def validate(verbosity = :verbose) SemanticChecks.instance.check(verbosity) do |check| + check.hana_is_installed(@system_id, @global_config.cluster.all_nodes) check.ipv4(@virtual_ip, "Virtual IP") check.nonneg_integer(@virtual_ip_mask, "Virtual IP mask") check.integer_in_range(@virtual_ip_mask, 1, 32, "CIDR mask has to be between 1 and 32.", @@ -136,6 +139,7 @@ def validate(verbosity = :verbose) "There is no such HANA user store key detected.", "Secure store key") end if @additional_instance + check.hana_is_installed(@np_system_id,@global_config.cluster.other_nodes) check.sap_instance_number(@np_instance, nil, "Non-Production Instance Number") check.sap_sid(@np_system_id, nil, "Non-Production System ID") check.not_equal(@instance, @np_instance, "SAP HANA instance numbers should not collide", @@ -194,7 +198,8 @@ def hana_backup_validator(check, hash) def production_constraints_validation(check, hash) check.element_in_set(hash[:preload_column_tables], ["true", "false"], "The field must contain a boolean value: 'true' or 'false'", "Preload column tables") - check.nonneg_integer(hash[:global_alloc_limit], "Global allocation limit") + check.not_equal(hash[:global_alloc_limit_prod], 0.to_s, "Global allocation limit production system must be adapted.") + check.not_equal(hash[:global_alloc_limit_non], 0.to_s, "Global allocation limit of non production system must be adapted.") end # Validator for the non-production instance constraints popup @@ -208,8 +213,8 @@ def non_production_constraints_validation(check, hash) def apply(role) return false unless configured? - @nlog.info("Appying HANA Configuration") - config_firewall(role) + @nlog.info("Applying HANA Configuration") + configure_firewall(role) if role == :master if @perform_backup SapHA::System::Hana.make_backup(@system_id, @backup_user, @backup_file, @instance) @@ -218,7 +223,6 @@ def apply(role) secondary_password = @global_config.cluster.host_passwords[secondary_host_name] SapHA::System::Hana.copy_ssfs_keys(@system_id, secondary_host_name, secondary_password) SapHA::System::Hana.enable_primary(@system_id, @site_name_1) - configure_crm else # secondary node SapHA::System::Hana.hdb_stop(@system_id) primary_host_name = @global_config.cluster.other_nodes_ext.first[:hostname] @@ -232,20 +236,15 @@ def apply(role) true end - def cleanup_hana_resources - # @FIXME: Workaround for Azure-specific issue that needs investigation - # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability - if @global_config.platform == "azure" - rsc = "rsc_SAPHana_#{@system_id}_HDB#{@instance}" - cleanup_status = exec_status("crm", "resource", "cleanup", rsc) - @nlog.log_status(cleanup_status.exitstatus == 0, - "Performed resource cleanup for #{rsc}", - "Could not clean up #{rsc}") - end + def finalize + configure_crm + wait_idle(@global_config.cluster.get_primary_on_primary) + activating_msr end + private + def configure_crm - # TODO: move this to SapHA::System::Local.configure_crm primary_host_name = @global_config.cluster.get_primary_on_primary secondary_host_name = @global_config.cluster.other_nodes_ext.first[:hostname] crm_conf = Helpers.render_template("tmpl_cluster_config.erb", binding) @@ -256,10 +255,55 @@ def configure_crm "Could not configure HANA cluster resources", out) end - def config_firewall(role) + # Wait until the node is in state S_IDLE but maximal 60 seconds + def wait_idle(node) + counter = 0 + while true + out, status = exec_outerr_status("crmadmin","--quiet","--status",node) + break if out == "S_IDLE" + log.info("wait_idle status of #{node} is #{out}") + counter += 1 + break if counter > 10 + sleep 6 + end + end + + def activating_msr + msr = "msl_SAPHana_#{@system_id}_HDB#{@instance}" + out, status = exec_outerr_status("crm", "resource", "refresh", msr) + @nlog.log_status(status.exitstatus == 0, + "#{msr} status refresh OK", + "Could not refresh status of #{msr}: #{out}") + out, status = exec_outerr_status("crm", "resource", "maintenance", msr, "off") + @nlog.log_status(status.exitstatus == 0, + "#{msr} maintenance turned off.", + "Could turn off maintenance on #{msr}: #{out}") + end + + def cleanup_hana_resources + # @FIXME: Workaround for Azure-specific issue that needs investigation + # https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability + if @global_config.platform == "azure" + rsc = "rsc_SAPHana_#{@system_id}_HDB#{@instance}" + cleanup_status = exec_status("crm", "resource", "cleanup", rsc) + @nlog.log_status(cleanup_status.exitstatus == 0, + "Performed resource cleanup for #{rsc}", + "Could not clean up #{rsc}") + end + end + + # Adapt the firewall depending on the @global_config.cluster.fw_config + # Even if the firewall is already configured the TCP port 8080 will be opened for internal RPC communication during setup + # If the firewall should be stoped during cofiguration no other action is necessary + # If the firewall should be configured in the first step the HANA-Services will be generated by hana-firewall. + # After them the generated services and the service cluster will be added to the default zone. + def configure_firewall(role) case @global_config.cluster.fw_config when "done" @nlog.info("Firewall is already configured") + if role != :master + _s = exec_status("/usr/bin/firewall-cmd", "--add-port", "8080/tcp") + end when "off" @nlog.info("Firewall will be turned off") SapHA::System::Local.systemd_unit(:stop, :service, "firewalld") @@ -267,13 +311,15 @@ def config_firewall(role) @nlog.info("Firewall will be configured for HANA services.") instances = Yast::SCR.Read(Yast::Path.new(".sysconfig.hana-firewall.HANA_INSTANCE_NUMBERS")).split instances << @instance - Yast::SCR.Write(Yast::Path.new(".sysconfig.hana-firewall.HANA_INSTANCE_NUMBERS"), instances) + Yast::SCR.Write(Yast::Path.new(".sysconfig.hana-firewall.HANA_INSTANCE_NUMBERS"), instances.join(" ")) Yast::SCR.Write(Yast::Path.new(".sysconfig.hana-firewall"), nil) _s = exec_status("/usr/sbin/hana-firewall", "generate-firewalld-services") _s = exec_status("/usr/bin/firewall-cmd", "--reload") if role != :master _s = exec_status("/usr/bin/firewall-cmd", "--add-port", "8080/tcp") end + _s = exec_status("/usr/bin/firewall-cmd", "--add-service", "cluster") + _s = exec_status("/usr/bin/firewall-cmd", "--permanent", "--add-service", "cluster") HANA_FW_SERVICES.each do |service| _s = exec_status("/usr/bin/firewall-cmd", "--add-service", service) _s = exec_status("/usr/bin/firewall-cmd", "--permanent", "--add-service", service) @@ -285,35 +331,38 @@ def config_firewall(role) # Creates the sudoers file def adapt_sudoers - if File.exist?(SapHA::Helpers.data_file_path("SUDOERS_HANASR.erb")) - Helpers.write_file("/etc/sudoers.d/saphanasr.conf",Helpers.render_template("SUDOERS_HANASR.erb", binding)) - end + if File.exist?(SapHA::Helpers.data_file_path("SUDOERS_HANASR.erb")) + Helpers.write_file("/etc/sudoers.d/saphanasr.conf",Helpers.render_template("SUDOERS_HANASR.erb", binding)) + end end # Activates all necessary plugins based on role an scenario def adjust_global_ini(role) # SAPHanaSR is needed on all nodes - add_plugin_to_global_ini("SAPHANA_SR") + add_plugin_to_global_ini("SAPHANA_SR", @system_id) if @additional_instance # cost optimized - add_plugin_to_global_ini("SUS_COSTOPT") if role != :master + add_plugin_to_global_ini("SUS_COSTOPT", @system_id) if role != :master + add_plugin_to_global_ini("NON_PROD", @np_system_id) if role != :master + command = ["hdbnsutil", "-reloadHADRProviders"] + _out, _status = su_exec_outerr_status("#{@np_system_id.downcase}adm", *command) else # performance optimized - add_plugin_to_global_ini("SUS_CHKSRV") - add_plugin_to_global_ini("SUS_TKOVER") + add_plugin_to_global_ini("SUS_CHKSRV", @system_id) + add_plugin_to_global_ini("SUS_TKOVER", @system_id) end command = ["hdbnsutil", "-reloadHADRProviders"] - out, status = su_exec_outerr_status("#{@system_id.downcase}adm", *command) + _out, _status = su_exec_outerr_status("#{@system_id.downcase}adm", *command) end # Activates the plugin in global ini - def add_plugin_to_global_ini(plugin) + def add_plugin_to_global_ini(plugin, sid) sr_path = Helpers.data_file_path("GLOBAL_INI_#{plugin}") if File.exist?("#{sr_path}.erb") sr_path = Helpers.write_var_file(plugin, Helpers.render_template("GLOBAL_INI_#{plugin}.erb", binding)) end - command = ["/usr/sbin/SAPHanaSR-manageProvider", "--add", "--sid", @system_id, sr_path] - out, status = su_exec_outerr_status("#{@system_id.downcase}adm", *command) + command = ["/usr/sbin/SAPHanaSR-manageProvider", "--add", "--reconfigure", "--sid", sid, sr_path] + _out, _status = su_exec_outerr_status("#{sid.downcase}adm", *command) end end end diff --git a/src/lib/sap_ha/sap_ha_installation.rb b/src/lib/sap_ha/sap_ha_installation.rb index 76f5587..5f36250 100644 --- a/src/lib/sap_ha/sap_ha_installation.rb +++ b/src/lib/sap_ha/sap_ha_installation.rb @@ -57,7 +57,7 @@ def run next_node log.info "--- #{self.class}.#{__callee__}: finished configuring node #{node[:hostname]} ---" end - @config.cluster_finalizer.apply(:master) + @config.hana.finalize @ui.unblock if @ui NodeLogger.summary :next diff --git a/src/lib/sap_ha/semantic_checks.rb b/src/lib/sap_ha/semantic_checks.rb index a03dbce..bc8c4fa 100644 --- a/src/lib/sap_ha/semantic_checks.rb +++ b/src/lib/sap_ha/semantic_checks.rb @@ -22,6 +22,7 @@ require "sap_ha/exceptions" require "yast" require "erb" +require "sap_ha/system/shell_commands" Yast.import "IP" Yast.import "Hostname" @@ -31,6 +32,7 @@ module SapHA class SemanticChecks include Singleton include Yast::Logger + include SapHA::System::ShellCommands attr_accessor :silent attr_reader :checks_passed @@ -50,6 +52,7 @@ def initialize @errors = [] @checks_passed = true @silent = true + @no_test = ENV["Y2DIR"].nil? end # Check if the string is a valid IPv4 address @@ -74,8 +77,8 @@ def ipv4_netmask(value, field_name = "") # @param field_name [String] name of the field in the form def ipv4_multicast(value, field_name = "") flag = Yast::IP.Check4(value) && value.start_with?("239.") - msg = "A valid IPv4 multicast address should belong to the 239.* network." - report_error(flag, msg, field_name, value) + message = "A valid IPv4 multicast address should belong to the 239.* network." + report_error(flag, message, field_name, value) end # Check if the IP belongs to the specified network given along with a CIDR netmask @@ -88,8 +91,8 @@ def ipv4_in_network_cidr(ip, network, field_name = "") rescue StandardError flag = false end - msg = "IP address has to belong to the network #{network}." - report_error(flag, msg, field_name, ip) + message = "IP address has to belong to the network #{network}." + report_error(flag, message, field_name, ip) end # Check if the provided IPs belong to the network @@ -122,14 +125,14 @@ def hostname(value, field_name = "") # @param field_name [String] name of the field in the form def port(value, field_name = "") max_port_number = 65_535 - msg = "The port number must be in between 1 and #{max_port_number}." + message = "The port number must be in between 1 and #{max_port_number}." begin portn = Integer(value) flag = 1 <= portn && portn <= 65_535 rescue ArgumentError, TypeError - return report_error(false, msg, field_name, value) + return report_error(false, message, field_name, value) end - report_error(flag, msg, field_name, value) + report_error(flag, message, field_name, value) end # Check if the provided value is a non-negative integer @@ -264,17 +267,39 @@ def non_empty_string(value, message, field_name, hide_value = false) report_error(flag, message || "The value must be a non-empty string", field_name, shown_value) end + # Check if a HANA db with given sid is installed + def hana_is_installed(value, nodes) + flag = true + message = '' + my_ips = SapHA::System::Network.ip_addresses + if @no_test + nodes.each do |node| + log.debug("node #{node} #{my_ips}") + if my_ips.include?(node) + status = exec_status("test", "-d", "/usr/sap/#{value.upcase}") + else + status = exec_status("ssh", "-o", "StrictHostKeyChecking=no", node, "test", "-d", "/usr/sap/#{value.upcase}") + end + if status != 0 + flag = false + message += "No SAP HANA #{value} is installed on #{node}\n" + end + end + end + report_error(flag, message, 'SID', value) + end + # Check if string is a block device # @param value [String] device path def block_device(value, field_name) - msg = "The provided path does not point to a block device." + message = "The provided path does not point to a block device." begin flag = File::Stat.new(value).blockdev? rescue StandardError flag = false end log.error "BLK: #{value}.blockdev? = #{flag}" - report_error(flag, msg, field_name, value) + report_error(flag, message, field_name, value) end # Start a transactional check @@ -284,7 +309,7 @@ def check(verbosity) false else true - end + end transaction_begin yield self return transaction_end if verbosity == :verbose diff --git a/src/lib/sap_ha/system/local.rb b/src/lib/sap_ha/system/local.rb index 518ea43..cae7e58 100644 --- a/src/lib/sap_ha/system/local.rb +++ b/src/lib/sap_ha/system/local.rb @@ -189,37 +189,6 @@ def join_cluster(_ip_address) raise "Not implemented" end - def config_firewall(fw_config, role) - case fw_config - when "done" - NodeLogger.info("Firewall is already configured") - when "off" - NodeLogger.info("Firewall will be turned off") - systemd_unit(:stop, :service, "firewalld") - when "setup" - NodeLogger.info("Firewall will be configured for cluster services.") - out, status = exec_outerr_status("/usr/bin/firewall-cmd", "--state") - return if status.exitstatus != 0 - out, status = exec_outerr_status("/usr/bin/firewall-cmd", "--add-service", "cluster") - NodeLogger.log_status( - status.exitstatus == 0, - "Open cluster service in firewall", - "Could not open cluster service in firewall", - out - ) - out, status = exec_outerr_status("/usr/bin/firewall-cmd", "--permanent", "--add-service", "cluster") - NodeLogger.log_status( - status.exitstatus == 0, - "Open cluster service permanent in firewall", - "Could not open cluster service permanent in firewall", - out - ) - if role != :master - exec_status("/usr/bin/firewall-cmd", "--add-port", "8080/tcp") - end - end - end - def change_password(user_name, password) cmd_string = "#{user_name}:#{password}" out, status = exec_outerr_status_stdin("chpasswd", cmd_string) diff --git a/src/lib/sap_ha/wizard/base_wizard_page.rb b/src/lib/sap_ha/wizard/base_wizard_page.rb index f1fec45..98596da 100644 --- a/src/lib/sap_ha/wizard/base_wizard_page.rb +++ b/src/lib/sap_ha/wizard/base_wizard_page.rb @@ -44,6 +44,7 @@ class BaseWizardPage # Initialize the Wizard page def initialize(model) + textdomain "hana-ha" log.debug "--- #{self.class}.#{__callee__} ---" @model = model end @@ -104,8 +105,12 @@ def main_loop case input # TODO: return only :abort, :cancel and :back from here. If the page needs anything else, # it should redefine the main_loop - when :abort, :back, :cancel, :join_cluster - @model.write_config if input == :abort || input == :cancel + when :abort, :cancel + if Yast::Popup.YesNo(_("Do you realy want to abort?")) + @model.write_config + return input + end + when :back, :join_cluster update_model return input when :next, :summary @@ -239,8 +244,9 @@ def base_popup(message, validator, *widgets) end.params[0] parameters[id] = Yast::UI.QueryWidget(Id(id), :Value) end - log.debug "--- #{self.class}.#{__callee__} popup parameters: #{parameters} ---" + log.debug "--- #{self.class}.#{__callee__} popup parameters: #{parameters} --- #{validator.class} -- #{@model.no_validators}" if validator && !@model.no_validators + log.debug "validator called" ret = SemanticChecks.instance.check_popup(validator, parameters) unless ret.empty? show_dialog_errors(ret) diff --git a/src/lib/sap_ha/wizard/hana_page.rb b/src/lib/sap_ha/wizard/hana_page.rb index eed77fb..214344f 100644 --- a/src/lib/sap_ha/wizard/hana_page.rb +++ b/src/lib/sap_ha/wizard/hana_page.rb @@ -142,8 +142,10 @@ def hana_production_constraints_popup(values) base_popup( "Production system constraints", @my_model.method(:production_constraints_validation), - MinWidth(20, InputField(Id(:global_alloc_limit), "Global &allocation limit (in MB):", - values[:global_alloc_limit] || "")), + MinWidth(20, InputField(Id(:global_alloc_limit_prod), "Productive DB global allocation limit (in MB):", + values[:global_alloc_limit_prod] || "")), + MinWidth(20, InputField(Id(:global_alloc_limit_non), "Non-productive DB global allocation limit (in MB):", + values[:global_alloc_limit_non] || "")), MinWidth(20, InputField(Id(:preload_column_tables), "&Preload column tables:", values[:preload_column_tables] || "")) )