|
|
@@ -33,12 +33,12 @@ end_per_suite(_) ->
|
|
|
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
|
|
|
|
|
|
init_per_testcase(t_autocluster_leave = TC, Config) ->
|
|
|
- [Core1, Core2, Core3, Repl] =
|
|
|
+ [Core1, Core2, Repl1, Repl2] =
|
|
|
Nodes = [
|
|
|
t_autocluster_leave_core1,
|
|
|
t_autocluster_leave_core2,
|
|
|
- t_autocluster_leave_core3,
|
|
|
- t_autocluster_leave_replicant
|
|
|
+ t_autocluster_leave_replicant1,
|
|
|
+ t_autocluster_leave_replicant2
|
|
|
],
|
|
|
|
|
|
NodeNames = [emqx_cth_cluster:node_name(N) || N <- Nodes],
|
|
|
@@ -58,8 +58,8 @@ init_per_testcase(t_autocluster_leave = TC, Config) ->
|
|
|
[
|
|
|
{Core1, #{role => core, apps => AppSpec}},
|
|
|
{Core2, #{role => core, apps => AppSpec}},
|
|
|
- {Core3, #{role => core, apps => AppSpec}},
|
|
|
- {Repl, #{role => replicant, apps => AppSpec}}
|
|
|
+ {Repl1, #{role => replicant, apps => AppSpec}},
|
|
|
+ {Repl2, #{role => replicant, apps => AppSpec}}
|
|
|
],
|
|
|
#{work_dir => emqx_cth_suite:work_dir(TC, Config)}
|
|
|
),
|
|
|
@@ -307,7 +307,7 @@ t_admin(_Config) ->
|
|
|
ok.
|
|
|
|
|
|
t_autocluster_leave(Config) ->
|
|
|
- [Core1, Core2, Core3, Repl] = Cluster = ?config(cluster, Config),
|
|
|
+ [Core1, Core2, Repl1, Repl2] = Cluster = ?config(cluster, Config),
|
|
|
%% Mria membership updates are async, makes sense to wait a little
|
|
|
timer:sleep(300),
|
|
|
ClusterView = [lists:sort(rpc:call(N, emqx, running_nodes, [])) || N <- Cluster],
|
|
|
@@ -317,24 +317,24 @@ t_autocluster_leave(Config) ->
|
|
|
?assertEqual(View1, View3),
|
|
|
?assertEqual(View1, View4),
|
|
|
|
|
|
- rpc:call(Core3, emqx_mgmt_cli, cluster, [["leave"]]),
|
|
|
+ rpc:call(Core2, emqx_mgmt_cli, cluster, [["leave"]]),
|
|
|
timer:sleep(1000),
|
|
|
- %% Replicant node may still discover and join Core3 which is now split from [Core1, Core2],
|
|
|
- %% but it's expected to choose a bigger cluster of [Core1, Core2]..
|
|
|
- ?assertMatch([Core3], rpc:call(Core3, emqx, running_nodes, [])),
|
|
|
+ %% Replicant nodes can discover Core2 which is now split from [Core1, Core2],
|
|
|
+ %% but they are expected to ignore Core2,
|
|
|
+ %% since mria_lb must filter out core nodes that disabled discovery.
|
|
|
+ ?assertMatch([Core2], rpc:call(Core2, emqx, running_nodes, [])),
|
|
|
?assertEqual(undefined, rpc:call(Core1, erlang, whereis, [ekka_autocluster])),
|
|
|
- ?assertEqual(lists:sort([Core1, Core2, Repl]), rpc:call(Core1, emqx, running_nodes, [])),
|
|
|
- ?assertEqual(lists:sort([Core1, Core2, Repl]), rpc:call(Core2, emqx, running_nodes, [])),
|
|
|
- ?assertEqual(lists:sort([Core1, Core2, Repl]), rpc:call(Repl, emqx, running_nodes, [])),
|
|
|
+ ?assertEqual(lists:sort([Core1, Repl1, Repl2]), rpc:call(Core1, emqx, running_nodes, [])),
|
|
|
+ ?assertEqual(lists:sort([Core1, Repl1, Repl2]), rpc:call(Repl1, emqx, running_nodes, [])),
|
|
|
+ ?assertEqual(lists:sort([Core1, Repl1, Repl2]), rpc:call(Repl2, emqx, running_nodes, [])),
|
|
|
|
|
|
- rpc:call(Repl, emqx_mgmt_cli, cluster, [["leave"]]),
|
|
|
+ rpc:call(Repl1, emqx_mgmt_cli, cluster, [["leave"]]),
|
|
|
timer:sleep(1000),
|
|
|
- ?assertEqual(lists:sort([Core1, Core2]), rpc:call(Core1, emqx, running_nodes, [])),
|
|
|
- ?assertEqual(lists:sort([Core1, Core2]), rpc:call(Core2, emqx, running_nodes, [])),
|
|
|
+ ?assertEqual(lists:sort([Core1, Repl2]), rpc:call(Core1, emqx, running_nodes, [])),
|
|
|
|
|
|
- rpc:call(Core3, emqx_mgmt_cli, cluster, [["discovery", "enable"]]),
|
|
|
- rpc:call(Repl, emqx_mgmt_cli, cluster, [["discovery", "enable"]]),
|
|
|
- %% core nodes will join and restart asyncly, may need more time to re-cluster
|
|
|
+ rpc:call(Core2, emqx_mgmt_cli, cluster, [["discovery", "enable"]]),
|
|
|
+ rpc:call(Repl1, emqx_mgmt_cli, cluster, [["discovery", "enable"]]),
|
|
|
+ %% nodes will join and restart asyncly, may need more time to re-cluster
|
|
|
?assertEqual(
|
|
|
ok,
|
|
|
emqx_common_test_helpers:wait_for(
|