Skip to content

Commit

Permalink
Merge pull request #4255 from esl/migration-6.2.1
Browse files Browse the repository at this point in the history
Update CETS node discovery and DB migrations
  • Loading branch information
JanuszJakubiec authored Apr 9, 2024
2 parents f67e8f6 + cb92b6a commit 5272f6b
Show file tree
Hide file tree
Showing 9 changed files with 138 additions and 81 deletions.
131 changes: 73 additions & 58 deletions big_tests/tests/cets_disco_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ file_cases() ->

rdbms_cases() ->
[rdbms_backend,
rdbms_backend_supports_cluster_change,
rdbms_backend_supports_auto_cleaning,
rdbms_backend_node_doesnt_remove_itself,
rdbms_backend_db_queries,
Expand Down Expand Up @@ -71,7 +72,7 @@ end_per_testcase(Name, Config) when Name == address_please_returns_ip;
Name == address_please_returns_ip_127_0_0_1_from_db ->
stop_cets_discovery(),
Config;
end_per_testcase(_CaseName, Config) ->
end_per_testcase(_CaseName, _Config) ->
unmock(mim()),
unmock(mim2()).

Expand All @@ -91,21 +92,33 @@ rdbms_backend(_Config) ->
Opts1 = #{cluster_name => CN, node_name_to_insert => <<"test1">>},
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
?assertMatch(#{last_query_info := #{already_registered := false}}, State1_2),
?assertEqual([], Nodes1_2),
init_and_get_nodes(mim(), Opts1, []),

%% "test2" node can see "test1" on initial registration
State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertMatch(#{last_query_info := #{already_registered := false}}, State2_2),
?assertEqual([test1], Nodes2_2),
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),

%% "test2" node can see "test1" on update
{{ok, Nodes2_3}, State2_3} = disco_get_nodes(mim2(), State2_2),
?assertEqual(lists:sort([test1, test2]), lists:sort(Nodes2_3)),
?assertMatch(#{last_query_info := #{already_registered := true}}, State2_3).
get_nodes(mim2(), State2, [test1, test2]).

rdbms_backend_supports_cluster_change(_Config) ->
CN1 = random_cluster_name(?FUNCTION_NAME),
CN2 = <<CN1/binary, "_new">>,
Opts1 = #{cluster_name => CN1, node_name_to_insert => <<"test1">>},
Opts2 = #{cluster_name => CN1, node_name_to_insert => <<"test2">>},

%% Nodes test1 and test2 are in CN1, and they become connected
State1 = init_and_get_nodes(mim(), Opts1, []),
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),
get_nodes(mim(), State1, [test1, test2]),

%% Node test1 moves to CN2, and the nodes are disconnected
NewState1 = init_and_get_nodes(mim(), Opts1#{cluster_name := CN2}, []),
get_nodes(mim2(), State2, [test2]),
NewState1A = get_nodes(mim(), NewState1, [test1]),

%% Node test2 moves to CN2, and the nodes are connected again
init_and_get_nodes(mim2(), Opts2#{cluster_name := CN2}, [test1]),
get_nodes(mim(), NewState1A, [test1, test2]).

rdbms_backend_supports_auto_cleaning(_Config) ->
Timestamp = month_ago(),
Expand All @@ -115,24 +128,17 @@ rdbms_backend_supports_auto_cleaning(_Config) ->
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

%% test1 row is written with an old (mocked) timestamp
State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
{{ok, Nodes1_3}, State1_3} = disco_get_nodes(mim(), State1_2),
?assertEqual([], Nodes1_2),
?assertEqual([test1], Nodes1_3),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_2),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_3),
State1 = init_and_get_nodes(mim(), Opts1, []),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1),
State1A = get_nodes(mim(), State1, [test1]),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1A),

%% test2 would clean test1 registration
%% We don't mock on mim2 node, so timestamps would differ
State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertEqual([], Nodes2_2),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, [<<"test1">>]}}},
State2_2),
{{ok, Nodes2_3}, State2_3} = disco_get_nodes(mim2(), State2),
?assertEqual([test2], Nodes2_3),
#{last_query_info := #{last_rows := SelectedRows}} = State2_3,
State2 = init_and_get_nodes(mim2(), Opts2, []),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, [<<"test1">>]}}}, State2),
State2A = get_nodes(mim2(), State2, [test2]),
#{last_query_info := #{last_rows := SelectedRows}} = State2A,
?assertMatch(1, length(SelectedRows)).

rdbms_backend_node_doesnt_remove_itself(_Config) ->
Expand All @@ -143,49 +149,45 @@ rdbms_backend_node_doesnt_remove_itself(_Config) ->
Opts2 = #{cluster_name => CN, node_name_to_insert => <<"test2">>},

%% test1 row is written with an old (mocked) timestamp
State1 = disco_init(mim(), Opts1),
{{ok, Nodes1_2}, State1_2} = disco_get_nodes(mim(), State1),
?assertEqual([], Nodes1_2),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_2),
State1 = init_and_get_nodes(mim(), Opts1, []),
?assertMatch(#{last_query_info := #{timestamp := Timestamp}}, State1),

unmock_timestamp(mim()),
%% test1 row is not removed and timestamp is updated
{{ok, Nodes1_3}, State1_3} = disco_get_nodes(mim(), State1_2),
?assertNotMatch(#{last_query_info := #{timestamp := Timestamp}}, State1_3),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}},
State1_3),
?assertEqual([test1], Nodes1_3),
State1A = get_nodes(mim(), State1, [test1]),
?assertNotMatch(#{last_query_info := #{timestamp := Timestamp}}, State1A),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}}, State1A),

State2 = disco_init(mim2(), Opts2),
{{ok, Nodes2_2}, State2_2} = disco_get_nodes(mim2(), State2),
?assertEqual([test1], Nodes2_2),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}},
State2_2).
State2 = init_and_get_nodes(mim2(), Opts2, [test1]),
?assertMatch(#{last_query_info := #{run_cleaning_result := {removed, []}}}, State2).

rdbms_backend_db_queries(_Config) ->
CN = random_cluster_name(?FUNCTION_NAME),
TS = rpc(mim(), mongoose_rdbms_timestamp, select, []),
TS2 = TS + 100,

%% insertion fails if node name or node num is already added for the cluster
?assertEqual({updated, 1}, insert_new(CN, <<"test1">>, 1, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"test1">>, 1, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"test1">>, 2, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"test2">>, 1, <<>>, TS)),
?assertEqual({updated, 1}, insert_new(CN, <<"test2">>, 2, <<>>, TS)),
?assertEqual({updated, 1}, insert_new(CN, <<"testA">>, 1, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"testA">>, 1, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"testA">>, 2, <<>>, TS)),
?assertMatch({error, _}, insert_new(CN, <<"testB">>, 1, <<>>, TS)),
?assertEqual({updated, 1}, insert_new(CN, <<"testB">>, 2, <<>>, TS)),

%% insertion fails if node is a member of another cluster
?assertMatch({error, _}, insert_new(<<"my-cluster">>, <<"testA">>, 1, <<>>, TS)),

%% update of the timestamp works correctly
{selected, SelectedNodes1} = select(CN),
?assertEqual(lists:sort([{<<"test1">>, 1, <<>>, TS}, {<<"test2">>, 2, <<>>, TS}]),
?assertEqual(lists:sort([{<<"testA">>, 1, <<>>, TS}, {<<"testB">>, 2, <<>>, TS}]),
lists:sort(SelectedNodes1)),
?assertEqual({updated, 1}, update_existing(CN, <<"test1">>, <<>>, TS2)),
?assertEqual({updated, 1}, update_existing(<<"testA">>, <<>>, TS2)),
{selected, SelectedNodes2} = select(CN),
?assertEqual(lists:sort([{<<"test1">>, 1, <<>>, TS2}, {<<"test2">>, 2, <<>>, TS}]),
?assertEqual(lists:sort([{<<"testA">>, 1, <<>>, TS2}, {<<"testB">>, 2, <<>>, TS}]),
lists:sort(SelectedNodes2)),

%% node removal work correctly
?assertEqual({updated, 1}, delete_node_from_db(CN, <<"test1">>)),
?assertEqual({selected, [{<<"test2">>, 2, <<>>, TS}]}, select(CN)).
%% node removal works correctly
?assertEqual({updated, 1}, delete_node_from_db(<<"testA">>)),
?assertEqual({selected, [{<<"testB">>, 2, <<>>, TS}]}, select(CN)).

rdbms_backend_publishes_node_ip(_Config) ->
%% get_pairs would return only real available nodes, so use the real node names
Expand Down Expand Up @@ -249,6 +251,19 @@ address_please_returns_ip_127_0_0_1_from_db(Config) ->
%% Helpers
%%--------------------------------------------------------------------

init_and_get_nodes(RPCNode, Opts, ExpectedNodes) ->
StateIn = disco_init(RPCNode, Opts),
get_nodes(RPCNode, StateIn, ExpectedNodes, false).

get_nodes(RPCNode, StateIn, ExpectedNodes) ->
get_nodes(RPCNode, StateIn, ExpectedNodes, true).

get_nodes(RPCNode, StateIn, ExpectedNodes, AlreadyRegistered) ->
{{ok, Nodes}, State} = disco_get_nodes(RPCNode, StateIn),
?assertEqual(lists:sort(ExpectedNodes), lists:sort(Nodes)),
?assertMatch(#{last_query_info := #{already_registered := AlreadyRegistered}}, State),
State.

disco_init(Node, Opts) ->
State = rpc(Node, mongoose_cets_discovery_rdbms, init, [Opts]),
log_disco_request(?FUNCTION_NAME, Node, Opts, State),
Expand Down Expand Up @@ -311,14 +326,14 @@ select(CN) ->
ct:log("select(~p) = ~p", [CN, Ret]),
Ret.

update_existing(CN, BinNode, Address, TS) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, update_existing, [CN, BinNode, Address, TS]),
ct:log("select(~p, ~p, ~p, ~p) = ~p", [CN, BinNode, Address, TS, Ret]),
update_existing(BinNode, Address, TS) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, update_existing, [BinNode, Address, TS]),
ct:log("select(~p, ~p, ~p) = ~p", [BinNode, Address, TS, Ret]),
Ret.

delete_node_from_db(CN, BinNode) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, delete_node_from_db, [CN, BinNode]),
ct:log("delete_node_from_db(~p, ~p) = ~p", [CN, BinNode, Ret]),
delete_node_from_db(BinNode) ->
Ret = rpc(mim(), mongoose_cets_discovery_rdbms, delete_node_from_db, [BinNode]),
ct:log("delete_node_from_db(~p) = ~p", [BinNode, Ret]),
Ret.

start_cets_discovery(Config) ->
Expand Down
5 changes: 2 additions & 3 deletions big_tests/tests/graphql_cets_SUITE.erl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ all() ->

groups() ->
[{admin_cets_http, [parallel], admin_cets_tests()},
{admin_cets_cli, [parallel], admin_cets_tests()},
{admin_cets_cli, [], admin_cets_tests()},
{domain_admin_cets, [], domain_admin_tests()},
{cets_not_configured, [parallel], cets_not_configured_test()}].

Expand Down Expand Up @@ -251,9 +251,8 @@ register_bad_node() ->
{updated, 1} = rpc(mim(), mongoose_cets_discovery_rdbms, insert_new, InsertArgs).

ensure_bad_node_unregistered() ->
ClusterName = <<"mim">>,
Node = <<"badnode@localhost">>,
DeleteArgs = [ClusterName, Node],
DeleteArgs = [Node],
%% Ensure the node is removed
{updated, _} = rpc(mim(), mongoose_cets_discovery_rdbms, delete_node_from_db, DeleteArgs).

Expand Down
24 changes: 21 additions & 3 deletions priv/migrations/mssql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
@@ -1,15 +1,33 @@
-- Update roster schema
DROP INDEX i_rosteru_server_user_jid ON rosterusers;
DROP INDEX i_rosteru_server_user ON rosterusers;
DROP INDEX i_rosteru_jid ON rosterusers;
ALTER TABLE rosterusers
DROP CONSTRAINT rosterusers$i_rosteru_server_user_jid;
ALTER TABLE rosterusers
ADD CONSTRAINT PK_rosterusers PRIMARY KEY CLUSTERED (server ASC, username ASC, jid ASC);

DROP INDEX i_rosteru_jid ON rostergroups;
DROP INDEX i_rosterg_server_user_jid ON rostergroups;
ALTER TABLE rostergroups
ALTER COLUMN grp VARCHAR(250) NOT NULL;
ALTER TABLE rostergroups
ALTER COLUMN grp VARCHAR(250),
ADD CONSTRAINT PK_rostergroups PRIMARY KEY CLUSTERED (server ASC, username ASC, jid ASC, grp ASC);

-- Store information whether the message is of type "groupchat" in the user's archive
ALTER TABLE mam_message
ADD is_groupchat smallint NOT NULL DEFAULT 0;

-- Create table for mod_caps
CREATE TABLE caps (
node varchar(250) NOT NULL,
sub_node varchar(250) NOT NULL,
features text NOT NULL,
PRIMARY KEY (node, sub_node)
);

-- Delete PK constraint before replacing it with a new one
DECLARE @pk VARCHAR(max) = (SELECT CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_NAME='discovery_nodes' AND CONSTRAINT_TYPE='PRIMARY KEY');
EXEC('ALTER TABLE discovery_nodes DROP CONSTRAINT ' + @pk);

-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes ADD PRIMARY KEY (node_name);
12 changes: 12 additions & 0 deletions priv/migrations/mysql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,15 @@ ALTER TABLE rostergroups MODIFY COLUMN grp VARCHAR(255), ADD PRIMARY KEY(server,
-- Store information whether the message is of type "groupchat" in the user's archive
ALTER TABLE mam_message
ADD COLUMN is_groupchat boolean NOT NULL DEFAULT false;

-- Create table for mod_caps
CREATE TABLE caps (
node varchar(250) NOT NULL,
sub_node varchar(250) NOT NULL,
features text NOT NULL,
PRIMARY KEY (node, sub_node)
);

-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes
DROP PRIMARY KEY, ADD PRIMARY KEY (node_name);
12 changes: 12 additions & 0 deletions priv/migrations/pgsql_6.2.0_x.x.x.sql
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,15 @@ ALTER TABLE rostergroups ADD PRIMARY KEY (server, username, jid, grp);
-- Store information whether the message is of type "groupchat" in the user's archive
ALTER TABLE mam_message
ADD COLUMN is_groupchat boolean NOT NULL DEFAULT false;

-- Create table for mod_caps
CREATE TABLE caps (
node varchar(250) NOT NULL,
sub_node varchar(250) NOT NULL,
features text NOT NULL,
PRIMARY KEY (node, sub_node)
);

-- In case of duplicates, you need to remove stale rows manually or wait for cleanup
ALTER TABLE discovery_nodes
DROP CONSTRAINT discovery_nodes_pkey, ADD PRIMARY KEY (node_name);
2 changes: 1 addition & 1 deletion priv/mssql2012.sql
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ CREATE TABLE discovery_nodes (
node_num INT NOT NULL,
address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS
updated_timestamp BIGINT NOT NULL, -- in seconds
PRIMARY KEY (cluster_name, node_name)
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes(cluster_name, node_num);

Expand Down
2 changes: 1 addition & 1 deletion priv/mysql.sql
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ CREATE TABLE discovery_nodes (
node_num INT UNSIGNED NOT NULL,
address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS
updated_timestamp BIGINT NOT NULL, -- in seconds
PRIMARY KEY (cluster_name, node_name)
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num USING BTREE ON discovery_nodes(cluster_name, node_num);

Expand Down
2 changes: 1 addition & 1 deletion priv/pg.sql
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ CREATE TABLE discovery_nodes (
node_num INT NOT NULL,
address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS
updated_timestamp BIGINT NOT NULL, -- in seconds
PRIMARY KEY (cluster_name, node_name)
PRIMARY KEY (node_name)
);
CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes USING BTREE(cluster_name, node_num);

Expand Down
Loading

0 comments on commit 5272f6b

Please sign in to comment.