diff --git a/.gitignore b/.gitignore
index 17868e3..7b7ed66 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,4 +15,6 @@ package/yast2-sap-ha-*.tar.bz2
.byebug_history
.vscode/
.vs/
-sap_ha_unattended_install_log.txt
\ No newline at end of file
+sap_ha_unattended_install_log.txt
+
+.tmp/
diff --git a/.solargraph.yml b/.solargraph.yml
deleted file mode 100644
index d362071..0000000
--- a/.solargraph.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-include:
-- "**/*.rb"
-- "/usr/share/YaST2/*.rb"
-exclude:
-- spec/**/*
-- test/**/*
-- vendor/**/*
-- ".bundle/**/*"
-require: []
-domains: []
-reporters:
-- rubocop
-- require_not_found
-plugins: []
-max_files: 5000
diff --git a/.yardopts b/.yardopts
index 3a00f15..22ce944 100644
--- a/.yardopts
+++ b/.yardopts
@@ -1,7 +1 @@
--no-private
---protected
---markup markdown
---output-dir doc/autodocs
---readme README.md
---file doc/yast-sap-ha.md
-src/**/*.rb
diff --git a/Rakefile b/Rakefile
index b48b28c..db32aea 100644
--- a/Rakefile
+++ b/Rakefile
@@ -27,4 +27,3 @@ Yast::Tasks.configuration do |conf|
conf.exclude_files << /test/
conf.exclude_files << /aux/
end
-
diff --git a/data/tmpl_cluster_config.erb b/data/tmpl_cluster_config.erb
deleted file mode 100644
index e3a0e74..0000000
--- a/data/tmpl_cluster_config.erb
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# defaults
-#
-
-rsc_defaults \
- resource-stickiness="1000" \
- migration-threshold="5000"
-
-op_defaults \
- timeout="600"
-
-#
-# production HANA
-#
-
-primitive rsc_ip_<%= @system_id -%>_HDB<%= @instance -%> ocf:heartbeat:IPaddr2 \
- params \
- ip="<%= @virtual_ip %>" cidr_netmask="<%= @virtual_ip_mask %>" \
- op start timeout="20" op stop timeout="20" \
- op monitor interval="10" timeout="20"
-
-primitive rsc_SAPHanaTopology_<%= @system_id -%>_HDB<%= @instance -%> ocf:suse:SAPHanaTopology \
- params \
- SID="<%= @system_id -%>" \
- InstanceNumber="<%= @instance -%>" \
- op monitor interval="10" timeout="600" \
- op start interval="0" timeout="600" \
- op stop interval="0" timeout="300"
-
-primitive rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> ocf:suse:SAPHana \
- params \
- SID="<%= @system_id -%>" \
- InstanceNumber="<%= @instance -%>" \
- PREFER_SITE_TAKEOVER="<%= @prefer_takeover -%>" \
- AUTOMATED_REGISTER="<%= @auto_register -%>" \
- DUPLICATE_PRIMARY_TIMEOUT="7200" \
- op start interval="0" timeout="3600" \
- op stop interval="0" timeout="3600" \
- op promote interval="0" timeout="3600" \
- op monitor interval="60" role="Master" timeout="700" \
- op monitor interval="61" role="Slave" timeout="700"
-
-ms msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> \
- meta clone-max="2" clone-node-max="1" interleave="true"
-
-<% if @global_config.platform == "azure" %>
-
-primitive rsc_nc_<%= @system_id -%>_HDB<%= @instance -%> anything \
- params binfile="/usr/bin/nc" cmdline_options="-l -k 62503" \
- meta resource-stickiness=0 \
- op monitor timeout="20" interval="10" depth="0"
-
-group g_ip_<%= @system_id -%>_HDB<%= @instance -%> rsc_ip_<%= @system_id -%>_HDB<%= @instance -%> rsc_nc_<%= @system_id -%>_HDB<%= @instance -%>
-
-colocation col_saphana_ip_<%= @system_id -%>_HDB<%= @instance -%> 2000: g_ip_<%= @system_id -%>_HDB<%= @instance -%>:Started msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%>:Master
-
-<% else %>
-
-colocation col_saphana_ip_<%= @system_id -%>_HDB<%= @instance -%> 2000: rsc_ip_<%= @system_id -%>_HDB<%= @instance -%>:Started msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%>:Master
-
-<% end %>
-
-clone cln_SAPHanaTopology_<%= @system_id -%>_HDB<%= @instance -%> rsc_SAPHanaTopology_<%= @system_id -%>_HDB<%= @instance -%> \
- meta is-managed="true" clone-node-max="1" interleave="true"
-
-order ord_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> Optional: cln_SAPHanaTopology_<%= @system_id -%>_HDB<%= @instance -%> msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%>
-
-
-<% if @additional_instance %>
-
-#
-# non-production HANA and constraints
-#
-
-primitive rsc_SAP_<%= @np_system_id -%>_HDB<%= @np_instance -%> ocf:heartbeat:SAPDatabase \
- params DBTYPE="HDB" SID="<%= @np_system_id -%>" \
- MONITOR_SERVICES="hdbindexserver|hdbnameserver" \
- op start interval="0" timeout="600" \
- op monitor interval="120" timeout="700" \
- op stop interval="0" timeout="300" \
- meta priority="100"
-
-location loc_<%= @np_system_id -%>_never_<%= primary_host_name -%> \
- rsc_SAP_<%= @np_system_id -%>_HDB<%= @np_instance -%> -inf: <%= primary_host_name -%>
-
-
-colocation col_<%= @np_system_id -%>_never_with_PRDip \
- -inf: rsc_SAP_<%= @np_system_id -%>_HDB<%= @np_instance -%>:Started \
- rsc_ip_<%= @system_id -%>_HDB<%= @instance -%>
-
-
-order ord_<%= @np_system_id -%>_stop_before_PRDpromote inf: \
- rsc_SAP_<%= @np_system_id -%>_HDB<%= @np_instance -%>:stop \
- msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%>:promote
-
-<% end %>
diff --git a/data/tmpl_srhook.py.erb b/data/tmpl_srhook.py.erb
deleted file mode 100644
index 83cfcc9..0000000
--- a/data/tmpl_srhook.py.erb
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python2
-"""
-Auto-generated HA/DR hook script
-
-"""
-
-dbuser="<%= @hook_script_parameters[:hook_db_user_name] %>"
-dbpwd="<%= @hook_script_parameters[:hook_db_password] %>"
-dbinst="<%= @hook_script_parameters[:hook_db_instance] %>"
-dbport="<%= @hook_script_parameters[:hook_port_number] %>"
-
-stmnt1 = "ALTER SYSTEM ALTER CONFIGURATION ('global.ini','SYSTEM') UNSET ('memorymanager','global_allocation_limit') WITH RECONFIGURE"
-stmnt2 = "ALTER SYSTEM ALTER CONFIGURATION ('global.ini','SYSTEM') UNSET ('system_replication','preload_column_tables') WITH RECONFIGURE"
-
-import os, time, dbapi
-
-from hdb_ha_dr.client import HADRBase, Helper
-
-class srTakeover(HADRBase):
- def __init__(self, *args, **kwargs):
- # delegate construction to base class
- super(srTakeover, self).__init__(*args, **kwargs)
-
- def about(self):
- return {"provider_company" : "SUSE",
- "provider_name" : "srTakeover", # provider name = class name
- "provider_description" : "Replication takeover script to set parameters to default.",
- "provider_version" : "1.0"}
-
- def startup(self, hostname, storage_partition, system_replication_mode, **kwargs):
- self.tracer.debug("enter startup hook; %s" % locals())
- self.tracer.debug(self.config.toString())
- self.tracer.info("leave startup hook")
- return 0
-
- def shutdown(self, hostname, storage_partition, system_replication_mode, **kwargs):
- self.tracer.debug("enter shutdown hook; %s" % locals())
- self.tracer.debug(self.config.toString())
- self.tracer.info("leave shutdown hook")
- return 0
-
- def failover(self, hostname, storage_partition, system_replication_mode, **kwargs):
- self.tracer.debug("enter failover hook; %s" % locals())
- self.tracer.debug(self.config.toString())
- self.tracer.info("leave failover hook")
- return 0
-
- def stonith(self, failingHost, **kwargs):
- self.tracer.debug("enter stonith hook; %s" % locals())
- self.tracer.debug(self.config.toString())
- # e.g. stonith of params["failed_host"]
- # e-g- set vIP active
- self.tracer.info("leave stonith hook")
- return 0
-
- def preTakeover(self, isForce, **kwargs):
- """Pre takeover hook."""
- self.tracer.info("%s.preTakeover method called with isForce=%s" %
- (self.__class__.__name__, isForce))
- if not isForce:
- # run pre takeover code
- # run pre-check, return != 0 in case of error => will abort takeover
- return 0
- else:
- # possible force-takeover only code
- # usually nothing to do here
- return 0
-
- def postTakeover(self, rc, **kwargs):
- """Post takeover hook."""
- self.tracer.info("%s.postTakeover method called with rc=%s" % (self.__class__.__name__, rc))
- if rc == 0:
- # normal takeover succeeded
- conn = dbapi.connect('localhost',dbport,dbuser,dbpwd)
- cursor = conn.cursor()
- cursor.execute(stmnt1)
- cursor.execute(stmnt2)
- return 0
- elif rc == 1:
- # waiting for force takeover
- conn = dbapi.connect('localhost',dbport,dbuser,dbpwd)
- cursor = conn.cursor()
- cursor.execute(stmnt1)
- cursor.execute(stmnt2)
- return 0
- elif rc == 2:
- # error, something went wrong
- return 0
\ No newline at end of file
diff --git a/package/yast2-sap-ha.changes b/package/yast2-sap-ha.changes
index 4c26555..b8b919b 100644
--- a/package/yast2-sap-ha.changes
+++ b/package/yast2-sap-ha.changes
@@ -1,7 +1,25 @@
-------------------------------------------------------------------
-Wed Aug 30 20:16:10 UTC 2023 - Josef Reidinger
+Mon Aug 7 05:13:47 UTC 2023 - Peter Varkoly
-- 5.0.0 (bsc#1185510)
+- Set default value for global_alloc_limit to "0"
+- Fix evaluation CustOpt settings. (bsc#1209204)
+- Remove superfluously BuildRequires: HANA-Firewall
+- 5.0.0 (#bsc1185510)
+
+-------------------------------------------------------------------
+Fri May 26 04:52:05 UTC 2023 - Peter Varkoly
+
+- yast2-sap-ha for Cost-Opt scenario is not up-to-date with SR takeover in best practice guide (bsc#1209204)
+- yast2-sap-ha can not configure firewall (bsc#1211027)
+- Rework package sturcture to use the yast2 defaults
+- New function to get the primary hostname on the master.
+- Fix setting secondary and primary hostname for the template
+- Do not enbale and start csync2 by installing the package. This is unsecure.
+- The hook creation is deprecated. This was removed from wizard and from backend.
+ This functionality now will be provided by the susCostOpt.py delivered by SAPHanaSR
+ Now a key sus__costopt must be created.
+- yast2-sap-ha: csync2 configuration not enabled (bsc#1202112)
+- 4.6.1
-------------------------------------------------------------------
Mon Mar 6 12:58:58 UTC 2023 - Ladislav Slezák
@@ -24,35 +42,54 @@ Thu Feb 23 11:21:43 UTC 2023 - Peter Varkoly
- L3: yast2-sap-ha error - Could not adjust global.ini for the production system
(bsc#1207740)
-- yast2-sap-ha: csync2 configuration not enabled (bsc#1202112)
-- 4.4.1
+- Add csync2 to buildrequires
+- 4.5.6
-------------------------------------------------------------------
-Thu Dec 29 10:28:37 UTC 2022 - Peter Varkoly
+Thu Dec 29 11:09:12 UTC 2022 - Peter Varkoly
- Clean up Rakefile
-- 1.0.18
-
--------------------------------------------------------------------
-Thu Dec 22 09:47:04 UTC 2022 - Peter Varkoly
-
- Use ruby base64 to replace uuencode/uudecode
(bsc#1206601)
-- 1.0.17
+- yast2-sap-ha: csync2 configuration not enabled (bsc#1202112)
+ Enable csync2 by installing the package. yast2-sap-ha will
+ not be executed on the second node.
+- 4.5.5
-------------------------------------------------------------------
-Tue Sep 6 10:06:08 UTC 2022 - Peter Varkoly
+Tue Sep 9 10:06:08 UTC 2022 - Peter Varkoly
- YaST2 HA Setup for SAP Products - cannot input several instance numbers
(bsc#1202979)
-- 1.0.16
+- 4.5.4
+
+-------------------------------------------------------------------
+Thu Sep 8 07:01:42 UTC 2022 - Michal Filka
+
+- bsc#1203227
+ - replaced fgrep by grep -F
+- 4.5.3
+
+-------------------------------------------------------------------
+Tue Aug 9 08:16:47 UTC 2022 - Peter Varkoly
+
+- yast2-sap-ha: csync2 configuration not enabled (bsc#1202112)
+- 4.5.2
-------------------------------------------------------------------
Mon Jun 13 14:32:45 UTC 2022 - Peter Varkoly
- YaST2 sap_ha tool does not allow digits at the beginning of site names
(bsc#1200427)
-- 1.0.15
+- 4.5.1
+
+-------------------------------------------------------------------
+Tue Jun 7 07:37:53 UTC 2022 - Peter Varkoly
+
+- ruby-xmlrpc is not part of stdlib anymore
+ Update requirement
+- Adapt version to normal yast2 version
+- 4.5.0
-------------------------------------------------------------------
Sun May 22 18:09:27 UTC 2022 - Peter Varkoly
@@ -73,17 +110,17 @@ Fri May 13 12:09:33 UTC 2022 - Peter Varkoly
- 1.0.13
-------------------------------------------------------------------
-Tue May 3 13:58:15 UTC 2022 - Peter Varkoly
+Tue May 3 13:56:48 UTC 2022 - Peter Varkoly
-- softdog missing in Yast while configuring HA for SAP Products
- (bsc#1199029)
+- Adapt ntp tests
+- (bsc#1199029)
+ softdog missing in Yast while configuring HA for SAP Products
- 1.0.12
-------------------------------------------------------------------
Tue Dec 7 15:12:47 UTC 2021 - Peter Varkoly
- kmod-compat has broken dependencies (bsc#1186618)
- Update requirement
- 1.0.11
-------------------------------------------------------------------
@@ -93,6 +130,7 @@ Tue Oct 12 16:08:26 UTC 2021 - Peter Varkoly
(bsc#1190774)
Add SAPHanaSR via global.ini as proposed in
https://documentation.suse.com/sbp/all/html/SLES4SAP-hana-sr-guide-PerfOpt-15/index.html#id-1.10.6.6"
+- Remove HANA takeover hook script. This is not necessary for HANA 2 any more.
- 1.0.10
-------------------------------------------------------------------
diff --git a/package/yast2-sap-ha.spec b/package/yast2-sap-ha.spec
index 1fe93f9..314e75b 100644
--- a/package/yast2-sap-ha.spec
+++ b/package/yast2-sap-ha.spec
@@ -1,7 +1,7 @@
#
# spec file for package yast2-sap-ha
#
-# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2023 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@@ -12,49 +12,48 @@
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
Name: yast2-sap-ha
Version: 5.0.0
Release: 0
-
BuildArch: noarch
-
Source0: %{name}-%{version}.tar.bz2
Source1: yast2-sap-ha-rpmlintrc
+Requires: conntrack-tools
+Requires: corosync
+Requires: corosync-qdevice
+Requires: crmsh
+Requires: csync2
+Requires: hawk2
+Requires: pacemaker
+Requires: rubygem(%{rb_default_ruby_abi}:xmlrpc)
Requires: yast2
+Requires: yast2-cluster >= 4.3.8
Requires: yast2-ruby-bindings
-Requires: csync2
-Requires: corosync
+Requires: yast2-ntp-client
# for opening URLs
Requires: xdg-utils
# for handling the SSH client
Requires: expect
+Requires: firewalld
Requires: openssh
-Requires: yast2-cluster
-Requires: yast2-ntp-client
-# for lsblk
+Requires: HANA-Firewall >= 2.0.3
Requires: util-linux
-# lsmod, modprobe
Requires: SAPHanaSR
Requires: kmod
-# configuration parser
-Requires: augeas-lenses
-Requires: rubygem(%{rb_default_ruby_abi}:cfa)
# for pidof
Requires: sysvinit-tools
-# xmlrpc was removed from stdlib
-%if 0%{?suse_version} >= 1540
-Requires: rubygem(%{rb_default_ruby_abi}:xmlrpc)
-BuildRequires: rubygem(%{rb_default_ruby_abi}:xmlrpc)
-%endif
-BuildRequires: augeas-lenses
BuildRequires: csync2
+BuildRequires: firewalld
BuildRequires: kmod
+BuildRequires: rubygem(%{rb_default_ruby_abi}:rspec)
+BuildRequires: rubygem(%{rb_default_ruby_abi}:xmlrpc)
+BuildRequires: rubygem(%{rb_default_ruby_abi}:yast-rake)
BuildRequires: sysvinit-tools
BuildRequires: update-desktop-files
BuildRequires: util-linux
@@ -64,19 +63,15 @@ BuildRequires: yast2-devtools
BuildRequires: yast2-ntp-client
BuildRequires: yast2-packager
BuildRequires: yast2-ruby-bindings
-BuildRequires: rubygem(%{rb_default_ruby_abi}:cfa)
-BuildRequires: rubygem(%{rb_default_ruby_abi}:rspec)
-BuildRequires: rubygem(%{rb_default_ruby_abi}:yast-rake)
Summary: SUSE High Availability Setup for SAP Products
-License: GPL-2.0
+License: GPL-2.0-only
Group: System/YaST
-Url: http://www.suse.com
+URL: http://www.suse.com
%description
-A YaST2 module to enable high availability for SAP HANA and SAP NetWeaver installations.
+A YaST2 module to enable high availability for SAP HANA installations.
%prep
-%define augeas_dir %{_datarootdir}/augeas/lenses/dist
%setup -n %{name}-%{version}
%check
@@ -85,26 +80,11 @@ rake test:unit
%build
%install
-mkdir -p %{buildroot}%{yast_dir}/data/sap_ha/
mkdir -p %{buildroot}%{yast_vardir}/sap_ha/
-mkdir -p %{yast_scrconfdir}
-mkdir -p %{buildroot}%{augeas_dir}
rake install DESTDIR="%{buildroot}"
-# wizard help files
-install -m 644 data/*.html %{buildroot}%{yast_dir}/data/sap_ha/
-# ruby templates
-install -m 644 data/*.erb %{buildroot}%{yast_dir}/data/sap_ha/
-# HA scenarios definitions
-install -m 644 data/scenarios.yaml %{buildroot}%{yast_dir}/data/sap_ha/
-# SSH invocation wrapper
-install -m 755 data/check_ssh.expect %{buildroot}%{yast_dir}/data/sap_ha/
-# Augeas lens for SAP INI files
-install -m 644 data/sapini.aug %{buildroot}%{augeas_dir}
%post
-/usr/bin/systemctl enable csync2.socket
-/usr/bin/systemctl start csync2.socket
%files
%defattr(-,root,root)
@@ -115,6 +95,6 @@ install -m 644 data/sapini.aug %{buildroot}%{augeas_dir}
%{yast_dir}/data/sap_ha/
%{yast_vardir}/sap_ha/
%{yast_scrconfdir}/*.scr
-%{augeas_dir}/sapini.aug
+%{yast_ybindir}
%changelog
diff --git a/pry_debug.rb b/pry_debug.rb
index 3abda2b..b002168 100644
--- a/pry_debug.rb
+++ b/pry_debug.rb
@@ -1,25 +1,25 @@
-require 'pry'
-require 'yaml'
+require "pry"
+require "yaml"
-ENV['Y2DIR'] = File.expand_path('../src', __FILE__)
+ENV["Y2DIR"] = File.expand_path("../src", __FILE__)
-require_relative 'test/test_helper'
+require_relative "test/test_helper"
-require_relative 'src/lib/sap_ha/configuration.rb'
-require 'sap_ha/system/ssh'
-require 'sap_ha/system/local'
-require 'sap_ha/semantic_checks'
-require 'sap_ha/helpers'
+require_relative "src/lib/sap_ha/configuration.rb"
+require "sap_ha/system/ssh"
+require "sap_ha/system/local"
+require "sap_ha/semantic_checks"
+require "sap_ha/helpers"
-require_relative 'src/clients/sap_ha.rb'
+require_relative "src/clients/sap_ha.rb"
def sequence
seq = Yast::SAPHA.sequence
- print 'product_check -> '
+ print "product_check -> "
current_key = "scenario_selection"
- while true
+ loop do
print current_key
- print ' -> '
+ print " -> "
current_key = seq[current_key][:next]
break if current_key.nil?
end
@@ -38,27 +38,27 @@ def rpc(node)
XMLRPC::Client.new(ip, "/RPC2", 8080)
end
-def process
+def process
Yast::SSH.instance.run_rpc_server("192.168.103.22")
sleep 3
c = prepare_hana_config
y = c.dump(true)
s = rpc(:hana2)
- s.call('sapha.import_config', y)
- s.call('sapha.config.start_setup')
+ s.call("sapha.import_config", y)
+ s.call("sapha.config.start_setup")
for component_id in c.components
puts "--- configuring component #{component_id} ---"
func = "sapha.config_#{component_id.to_s[1..-1]}.bogus_apply"
s.call(func)
end
- s.call('sapha.config.end_setup')
- puts s.call('sapha.config.collect_log')
- s.call('sapha.shutdown')
+ s.call("sapha.config.end_setup")
+ puts s.call("sapha.config.collect_log")
+ s.call("sapha.shutdown")
# s.call('system.listMethods')
end
def read_config
- YAML.load(File.read('config.yml'))
+ YAML.load(File.read("config.yml"))
end
def create_sequence(conf)
@@ -75,7 +75,7 @@ def create_sequence(conf)
ntp: "ntp",
next: "installation",
back: :back
- } if current == 'config_overview'
+ } if current == "config_overview"
{
back: :back,
abort: :abort,
@@ -85,99 +85,95 @@ def create_sequence(conf)
}
end
seq = {}
- pages_seq = conf.scenario['screen_sequence']
+ pages_seq = conf.scenario["screen_sequence"]
(0...pages_seq.length).each do |ix|
- seq['ws_start'] = pages_seq[0] if ix == 0
- seq[pages_seq[ix]] = make_entry.call(pages_seq[ix], pages_seq[ix+1])
+ seq["ws_start"] = pages_seq[0] if ix == 0
+ seq[pages_seq[ix]] = make_entry.call(pages_seq[ix], pages_seq[ix + 1])
end
seq
end
c = SapHA::HAConfiguration.new
c = prepare_hana_config(c, notest: true, transport_mode: :multicast)
-c.cluster.nodes[:node1][:ip_ring1] = '192.168.101.1'
-c.cluster.nodes[:node1][:ip_ring2] = '192.168.103.1'
+c.cluster.nodes[:node1][:ip_ring1] = "192.168.101.1"
+c.cluster.nodes[:node1][:ip_ring2] = "192.168.103.1"
r = rpc :hana1
y = c.dump(true)
-require_relative 'src/lib/sap_ha/system/connectivity'
-h = SapHA::System::Host.new('hana01', ['192.168.103.21'])
-
+require_relative "src/lib/sap_ha/system/connectivity"
+h = SapHA::System::Host.new("hana01", ["192.168.103.21"])
def cccccc
@config = SapHA::HAConfiguration.new
@config.set_product_id "HANA"
- @config.set_scenario_name 'Scale Up: Performance-optimized'
+ @config.set_scenario_name "Scale Up: Performance-optimized"
+ @config.cluster.import(
+ number_of_rings: 2,
+ transport_mode: :unicast,
+ # transport_mode: :multicast,
+ cluster_name: "hana_sysrep",
+ expected_votes: 2,
+ rings: {
+ ring1: {
+ address: "192.168.101.0",
+ port: "5405",
+ id: 1,
+ mcast: ""
+ # mcast: '239.0.0.1'
+ },
+ ring2: {
+ address: "192.168.103.0",
+ port: "5405",
+ id: 2,
+ mcast: ""
+ # mcast: '239.0.0.2'
+ }
+ }
+ )
@config.cluster.import(
- number_of_rings: 2,
- transport_mode: :unicast,
- # transport_mode: :multicast,
- cluster_name: 'hana_sysrep',
- expected_votes: 2,
- rings: {
- ring1: {
- address: '192.168.101.0',
- port: '5405',
- id: 1,
- mcast: ''
- # mcast: '239.0.0.1'
- },
- ring2: {
- address: '192.168.103.0',
- port: '5405',
- id: 2,
- mcast: ''
- # mcast: '239.0.0.2'
- }
- }
- )
- @config.cluster.import(
- number_of_rings: 2,
- number_of_nodes: 2,
- nodes: {
- node1: {
- host_name: "hana01",
- ip_ring1: "192.168.101.21",
- ip_ring2: "192.168.103.21",
- node_id: '1'
- },
- node2: {
- host_name: "hana02",
- ip_ring1: "192.168.101.22",
- ip_ring2: "192.168.103.22",
- node_id: '2'
- }
- }
- )
- @config.fencing.import(devices: [{ name: '/dev/vdb', type: 'disk', uuid: '' }])
- @config.watchdog.import(to_install: ['softdog'])
- @config.hana.import(
- system_id: 'XXX',
- instance: '00',
- virtual_ip: '192.168.101.100',
- backup_user: 'xxxadm'
- )
- ntp_cfg = {
- "synchronize_time" => false,
- "sync_interval" => 5,
- "start_at_boot" => true,
- "start_in_chroot" => false,
- "ntp_policy" => "auto",
- "restricts" => [],
- "peers" => [
- { "type" => "server",
- "address" => "ntp.local",
- "options" => " iburst",
- "comment" => "# key (6) for accessing server variables\n"
- }
- ]
+ number_of_rings: 2,
+ number_of_nodes: 2,
+ nodes: {
+ node1: {
+ host_name: "hana01",
+ ip_ring1: "192.168.101.21",
+ ip_ring2: "192.168.103.21",
+ node_id: "1"
+ },
+ node2: {
+ host_name: "hana02",
+ ip_ring1: "192.168.101.22",
+ ip_ring2: "192.168.103.22",
+ node_id: "2"
}
- @config.ntp.read_configuration
- return @config
+ }
+ )
+ @config.fencing.import(devices: [{ name: "/dev/vdb", type: "disk", uuid: "" }])
+ @config.watchdog.import(to_install: ["softdog"])
+ @config.hana.import(
+ system_id: "XXX",
+ instance: "00",
+ virtual_ip: "192.168.101.100",
+ backup_user: "xxxadm"
+ )
+ ntp_cfg = {
+ "synchronize_time" => false,
+ "sync_interval" => 5,
+ "start_at_boot" => true,
+ "start_in_chroot" => false,
+ "ntp_policy" => "auto",
+ "restricts" => [],
+ "peers" => [
+ { "type" => "server",
+ "address" => "ntp.local",
+ "options" => " iburst",
+ "comment" => "# key (6) for accessing server variables\n" }
+ ]
+ }
+ @config.ntp.read_configuration
+ return @config
end
c2 = cccccc
-binding.pry
-
puts nil
diff --git a/data/check_ssh.expect b/src/bin/check_ssh.expect
similarity index 100%
rename from data/check_ssh.expect
rename to src/bin/check_ssh.expect
diff --git a/src/clients/sap_ha.rb b/src/clients/sap_ha.rb
index 92cfb3a..a0efbd1 100644
--- a/src/clients/sap_ha.rb
+++ b/src/clients/sap_ha.rb
@@ -21,6 +21,7 @@
require "yast"
require "psych"
+require "sap_ha/configuration"
require "sap_ha/helpers"
require "sap_ha/node_logger"
require "sap_ha/wizard/cluster_nodes_page"
@@ -36,7 +37,6 @@
require "sap_ha/wizard/list_selection"
require "sap_ha/wizard/rich_text"
require "sap_ha/wizard/scenario_selection_page"
-require "sap_ha/configuration"
# YaST module
module Yast
@@ -47,6 +47,7 @@ class SAPHAClass < Client
Yast.import "UI"
Yast.import "Wizard"
Yast.import "Sequencer"
+ Yast.import "Service"
Yast.import "Popup"
include Yast::UIShortcuts
include Yast::Logger
@@ -54,7 +55,7 @@ class SAPHAClass < Client
def initialize
log.warn "--- called #{self.class}.#{__callee__}: CLI arguments are #{WFM.Args} ---"
- begin
+ begin
if WFM.Args.include?("readconfig")
ix = WFM.Args.index("readconfig") + 1
begin
@@ -65,7 +66,7 @@ def initialize
@config.imported = true
if WFM.Args.include?("unattended")
@config.unattended = true
- end
+ end
else
@config = SapHA::HAConfiguration.new
end
@@ -189,7 +190,7 @@ def initialize
next: :ws_finish
}
}
-
+
@unattended_sequence = {
"ws_start" => "product_check",
"product_check" => {
@@ -214,7 +215,6 @@ def initialize
}
}
-
@aliases = {
"product_check" => -> { product_check },
"file_import_check" => -> { file_import_check },
@@ -235,7 +235,7 @@ def initialize
"config_overview" => -> { configuration_overview },
"summary" => -> { show_summary }
}
- end
+ end
end
def main
@@ -245,32 +245,32 @@ def main
Wizard.CreateDialog
Wizard.SetDialogTitle("HA Setup for SAP Products")
begin
- if @config.unattended
- Sequencer.Run(@aliases, @unattended_sequence)
+ if @config.unattended
+ Sequencer.Run(@aliases, @unattended_sequence)
else
- Sequencer.Run(@aliases, @sequence)
+ Sequencer.Run(@aliases, @sequence)
end
rescue StandardError => e
# FIXME: y2start overrides the return code, therefore exit prematurely without
# shutting down Yast properly, see bsc#1099871
# If the error was not catched until here, we know that is a unattended installation.
# exit!(1)
- @unattended_error = "Error occurred during the unattended installation: #{e.message}"
- log.error @unattended_error
+ @unattended_error = "Error occurred during the unattended installation: #{e.message}"
+ log.error @unattended_error
puts @unattended_error
Popup.TimedError(@unattended_error, 10)
ensure
Wizard.CloseDialog
if @config.unattended
- if @unattended_error.nil?
- success = SapHA::Helpers.write_file("/var/log/YaST2/sap_ha_unattended_install_log.txt", SapHA::NodeLogger.text)
+ if @unattended_error.nil?
+ SapHA::Helpers.write_file("/var/log/YaST2/sap_ha_unattended_install_log.txt", SapHA::NodeLogger.text)
log.info "Execution Finished: Please, verify the log /var/log/YaST2/sap_ha_unattended_install_log.txt"
- # FIXME: yast redirects stdout, therefore the usage of the CommanlineClass is needed to write on the stdout, but as the
- # the dependent modules we have (cluster, firewall, ntp) demands UI existence, we cannot call the module without creating the UI object.
+ # FIXME: yast redirects stdout, therefore the usage of the CommanlineClass is needed to write on the stdout, but as the
+ # the dependent modules we have (cluster, firewall, ntp) demands UI existence, we cannot call the module without creating the UI object.
# The best option is to presente a Timed Popup to the user.
Popup.TimedMessage("Execution Finished: Please, verify the log /var/log/YaST2/sap_ha_unattended_install_log.txt", 10)
end
- end
+ end
end
end
@@ -288,25 +288,23 @@ def product_check
return :unknown
end
# TODO: here we should check if the symbol can be handled by th
- #stat = Yast::Cluster.LoadClusterConfig
- #Yast::Cluster.load_csync2_confe Sequencer
+ # stat = Yast::Cluster.LoadClusterConfig
+ # Yast::Cluster.load_csync2_confe Sequencer
@config.product.fetch("id", "abort").downcase.to_sym
end
def file_import_check
- begin
- log.debug "--- called #{self.class}.#{__callee__} ---"
- SapHA::SAPHAUnattendedInstall.new(@config).check_config
- rescue StandardError => e
- if @config.unattended
- # Will be trated by the caller to collect the log.
- raise e
- else
- # Adjust the WF to show the Summary with the problems.
- return :unknown
- end
+ log.debug "--- called #{self.class}.#{__callee__} ---"
+ SapHA::SAPHAUnattendedInstall.new(@config).check_config
+ rescue StandardError => e
+ if @config.unattended
+ # Will be trated by the caller to collect the log.
+ raise e
+ else
+ # Adjust the WF to show the Summary with the problems.
+ return :unknown
end
- end
+ end
def scenario_selection
log.debug "--- called #{self.class}.#{__callee__} ---"
@@ -436,6 +434,9 @@ def run_unattended_install
def show_summary
log.debug "--- called #{self.class}.#{__callee__} ---"
+ if File.exist?(SapHA::Helpers.var_file_path("need_to_start_firewalld"))
+ Service.Start("firewalld")
+ end
SapHA::Wizard::SetupSummaryPage.new(@config).run
end
diff --git a/src/clients/sap_ha_rpc.rb b/src/clients/sap_ha_rpc.rb
index 006463f..18b2760 100644
--- a/src/clients/sap_ha_rpc.rb
+++ b/src/clients/sap_ha_rpc.rb
@@ -19,24 +19,22 @@
# Summary: SUSE High Availability Setup for SAP Products: XML RPC Server Client
# Authors: Ilya Manyugin
-require 'yast'
-require 'sap_ha/rpc_server'
+require "yast"
+require "sap_ha/rpc_server"
module Yast
# An XML RPC Yast Client
class SapHARPCClass < Client
-
def initialize
@server = SapHA::RPCServer.new
at_exit { @server.shutdown }
end
-
+
def main
# the following call blocks
@server.start
# when .shutdown is called
@server.close_port
- Yast::SuSEFirewall.ActivateConfiguration
end
end
diff --git a/src/data/sap_ha/GLOBAL_INI_SAPHANA_SR b/src/data/sap_ha/GLOBAL_INI_SAPHANA_SR
new file mode 100644
index 0000000..6ddde79
--- /dev/null
+++ b/src/data/sap_ha/GLOBAL_INI_SAPHANA_SR
@@ -0,0 +1,7 @@
+[ha_dr_provider_saphanasr]
+provider = SAPHanaSR
+path = /usr/share/SAPHanaSR/
+execution_order = 1
+
+[trace]
+ha_dr_saphanasr = info
diff --git a/src/data/sap_ha/GLOBAL_INI_SUS_CHKSRV b/src/data/sap_ha/GLOBAL_INI_SUS_CHKSRV
new file mode 100644
index 0000000..f9bb4b6
--- /dev/null
+++ b/src/data/sap_ha/GLOBAL_INI_SUS_CHKSRV
@@ -0,0 +1,8 @@
+[ha_dr_provider_suschksrv]
+provider = susChkSrv
+path = /usr/share/SAPHanaSR/
+execution_order = 3
+action_on_lost=stop
+
+[trace]
+ha_dr_suschksrv = info
diff --git a/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb b/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb
new file mode 100644
index 0000000..a9e6414
--- /dev/null
+++ b/src/data/sap_ha/GLOBAL_INI_SUS_COSTOPT.erb
@@ -0,0 +1,14 @@
+[memorymanager]
+global_allocation_limit = <%= @production_constraints[:global_alloc_limit] -%>
+
+[system_replication]
+preload_column_tables = <%= @production_constraints[:preload_column_tables] -%>
+
+[ha_dr_provider_suscostopt]
+provider = susCostOpt
+path = /usr/share/SAPHanaSR/
+userkey = sus_<%= @system_id -%>_costopt
+execution_order = 2
+
+[trace]
+ha_dr_suscostopt = info
diff --git a/src/data/sap_ha/GLOBAL_INI_SUS_TKOVER b/src/data/sap_ha/GLOBAL_INI_SUS_TKOVER
new file mode 100644
index 0000000..c5f2055
--- /dev/null
+++ b/src/data/sap_ha/GLOBAL_INI_SUS_TKOVER
@@ -0,0 +1,7 @@
+[ha_dr_provider_sustkover]
+provider = susTkOver
+path = /usr/share/SAPHanaSR/
+execution_order = 2
+
+[trace]
+ha_dr_sustkover = info
diff --git a/src/data/sap_ha/SUDOERS_HANASR.erb b/src/data/sap_ha/SUDOERS_HANASR.erb
new file mode 100644
index 0000000..e2d4bfe
--- /dev/null
+++ b/src/data/sap_ha/SUDOERS_HANASR.erb
@@ -0,0 +1,9 @@
+# SAPHanaSR-ScaleUp entries for writing srHook cluster attribute
+<%= @system_id -%>adm ALL=(ALL) NOPASSWD: /usr/sbin/crm_attribute -n hana_<%= @system_id -%>_site_srHook_*
+
+# SAPHanaSR-ScaleUp entries for writing srHook cluster attribute
+Cmnd_Alias SOK_SITEA = /usr/sbin/crm_attribute -n hana_<%= @system_id -%>_site_srHook_<%= @site_name_1 -%> -v SOK -t crm_config -s SAPHanaSR
+Cmnd_Alias SFAIL_SITEA = /usr/sbin/crm_attribute -n hana_<%= @system_id -%>_site_srHook_<%= @site_name_1 -%> -v SFAIL -t crm_config -s SAPHanaSR
+Cmnd_Alias SOK_SITEB = /usr/sbin/crm_attribute -n hana_<%= @system_id -%>_site_srHook_<%= @site_name_2 -%> -v SOK -t crm_config -s SAPHanaSR
+Cmnd_Alias SFAIL_SITEB = /usr/sbin/crm_attribute -n hana_<%= @system_id -%>_site_srHook_<%= @site_name_2 -%> -v SFAIL -t crm_config -s SAPHanaSR
+<%= @system_id -%>adm ALL=(ALL) NOPASSWD: SOK_SITEA, SFAIL_SITEA, SOK_SITEB, SFAIL_SITEB
diff --git a/data/help_cluster_nodes.html b/src/data/sap_ha/help_cluster_nodes.html
similarity index 100%
rename from data/help_cluster_nodes.html
rename to src/data/sap_ha/help_cluster_nodes.html
diff --git a/data/help_comm_layer.html b/src/data/sap_ha/help_comm_layer.html
similarity index 68%
rename from data/help_comm_layer.html
rename to src/data/sap_ha/help_comm_layer.html
index 67c5087..addb7ea 100644
--- a/data/help_comm_layer.html
+++ b/src/data/sap_ha/help_comm_layer.html
@@ -20,10 +20,16 @@ Communication Layer
Please make sure that the provided IP addresses correspond to the network addresses of the rings.
Note: the number of nodes for HANA scale-up clusters is fixed and cannot be changed.
- Enable Corosync Secure Authentication:
+
There are 3 possibility for the Firewall configuration:
+
+ - Firewall is configured The firewall on all nodes is already configured. All necessary ports for cluster and SAP HANA are opened. In this case only port 8080 will be temporary opened on the slave node during the setup
+ - Turn off Firewall The firewall will be turned of on all nodes during the setup.
+ - Configure Firewall The firewall will be configured by using HANA-firewall and by enabling the cluster service.
+
+ Enable Corosync Secure Authentication:
require the HMAC/SHA1 authentication for cluster messages authentication.
This option reduces total throughput of the cluster messaging level and consumes extra CPU cycles for encryption.
Enable csync2: enable the csync2
service that synchronizes cluster-related configuration files between nodes.
-