From adbc47d3bbe5fc5c358f4004e19d31d7dbfdb8bb Mon Sep 17 00:00:00 2001 From: Manan Gupta Date: Thu, 2 Jan 2025 17:51:10 +0530 Subject: [PATCH] feat: use the existing keyspace struct instead of creating a new one Signed-off-by: Manan Gupta --- go/cmd/vtctldclient/command/keyspaces.go | 8 +- go/test/endtoend/keyspace/keyspace_test.go | 8 +- .../reparent/emergencyreparent/ers_test.go | 28 +++---- .../reparent/newfeaturetest/reparent_test.go | 14 ++-- .../reparent/plannedreparent/reparent_test.go | 26 +++---- .../reparent/semisync/semi_sync_test.go | 4 +- go/test/endtoend/reparent/utils/utils.go | 4 +- .../transaction/benchmark/bench_test.go | 4 +- .../transaction/twopc/fuzz/main_test.go | 6 +- .../endtoend/transaction/twopc/main_test.go | 4 +- .../transaction/twopc/metric/main_test.go | 4 +- .../transaction/twopc/stress/main_test.go | 6 +- go/test/endtoend/transaction/tx_test.go | 4 +- .../primaryfailure/primary_failure_test.go | 10 +-- go/test/endtoend/vtorc/utils/utils.go | 4 +- go/vt/vtctl/grpcvtctldserver/server.go | 25 ++++--- go/vt/vtctl/grpcvtctldserver/server_test.go | 9 ++- go/vt/vtctl/reparentutil/durability_funcs.go | 17 +++-- .../reparentutil/durability_funcs_test.go | 61 ++++++++-------- .../reparentutil/emergency_reparenter.go | 13 ++-- .../reparentutil/emergency_reparenter_test.go | 73 ++++++++++--------- .../vtctl/reparentutil/planned_reparenter.go | 15 ++-- .../planned_reparenter_flaky_test.go | 32 ++++---- .../reparentutil/{ => policy}/durability.go | 2 +- .../{ => policy}/durability_test.go | 2 +- go/vt/vtctl/reparentutil/reparent_sorter.go | 11 +-- .../reparentutil/reparent_sorter_test.go | 3 +- go/vt/vtctl/reparentutil/replication.go | 7 +- go/vt/vtctl/reparentutil/replication_test.go | 33 +++++---- go/vt/vtctl/reparentutil/util.go | 5 +- go/vt/vtctl/reparentutil/util_test.go | 5 +- go/vt/vtctl/vtctl.go | 6 +- go/vt/vtctld/api_test.go | 4 +- go/vt/vtorc/inst/analysis_dao.go | 14 ++-- go/vt/vtorc/inst/analysis_dao_test.go | 70 +++++++++--------- go/vt/vtorc/inst/keyspace_dao.go | 6 +- go/vt/vtorc/inst/keyspace_dao_test.go | 10 +-- .../logic/keyspace_shard_discovery_test.go | 16 ++-- go/vt/vtorc/logic/topology_recovery.go | 7 +- go/vt/vttablet/tabletmanager/rpc_backup.go | 6 +- go/vt/vttablet/tabletmanager/tm_init.go | 6 +- go/vt/wrangler/reparent.go | 5 +- go/vt/wrangler/tablet.go | 5 +- .../testlib/emergency_reparent_shard_test.go | 5 +- .../testlib/planned_reparent_shard_test.go | 10 +-- go/vt/wrangler/testlib/reparent_utils_test.go | 5 +- 46 files changed, 322 insertions(+), 300 deletions(-) rename go/vt/vtctl/reparentutil/{ => policy}/durability.go (99%) rename go/vt/vtctl/reparentutil/{ => policy}/durability_test.go (99%) diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index 5c5da619767..4d1156291cf 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -26,7 +26,7 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/constants/sidecar" @@ -154,7 +154,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { var snapshotTime *vttime.Time if topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) == topodatapb.KeyspaceType_SNAPSHOT { - if createKeyspaceOptions.DurabilityPolicy != reparentutil.DurabilityNone { + if createKeyspaceOptions.DurabilityPolicy != policy.DurabilityNone { return errors.New("--durability-policy cannot be specified while creating a snapshot keyspace") } @@ -410,7 +410,7 @@ func init() { CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") - CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") Root.AddCommand(CreateKeyspace) @@ -426,7 +426,7 @@ func init() { RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified keyspace.") Root.AddCommand(RemoveKeyspaceCell) - SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") + SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") Root.AddCommand(SetKeyspaceDurabilityPolicy) ValidateSchemaKeyspace.Flags().BoolVar(&validateSchemaKeyspaceOptions.IncludeViews, "include-views", false, "Includes views in compared schemas.") diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index ee7e072a010..c899ee7cdec 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -143,18 +143,18 @@ func TestDurabilityPolicyField(t *testing.T) { out, err := vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "ks_durability", "--durability-policy=semi_sync") require.NoError(t, err, out) - checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync) + checkDurabilityPolicy(t, policy.DurabilitySemiSync) out, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", "ks_durability", "--durability-policy=none") require.NoError(t, err, out) - checkDurabilityPolicy(t, reparentutil.DurabilityNone) + checkDurabilityPolicy(t, policy.DurabilityNone) out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) - checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync) + checkDurabilityPolicy(t, policy.DurabilitySemiSync) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 8eabf6013dd..37855a47df6 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -28,11 +28,11 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestTrivialERS(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -56,7 +56,7 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -97,7 +97,7 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -132,7 +132,7 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -167,7 +167,7 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -194,7 +194,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -223,7 +223,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -250,7 +250,7 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -272,7 +272,7 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -343,7 +343,7 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -442,7 +442,7 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -469,7 +469,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -508,7 +508,7 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index 1b5af658cb3..fc5db965847 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -28,7 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) // TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values @@ -37,7 +37,7 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -68,7 +68,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // Confirm that the replication is setup correctly in the beginning. @@ -103,7 +103,7 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -115,7 +115,7 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -160,7 +160,7 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -177,7 +177,7 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { - clusterInstance := utils.SetupShardedReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupShardedReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) // Stop all VTOrc instances, so that they don't interfere with the test. diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 0d2c211395b..7b750dc3f16 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -33,11 +33,11 @@ import ( "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -48,7 +48,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -61,7 +61,7 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -83,7 +83,7 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -109,7 +109,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -126,7 +126,7 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.DeleteTablet(t, clusterInstance, tablets[2]) @@ -173,13 +173,13 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -278,7 +278,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -324,7 +324,7 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -390,7 +390,7 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityCrossCell) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityCrossCell) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -429,7 +429,7 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index c04e39463bc..804a1645f19 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -25,7 +25,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestSemiSyncUpgradeDowngrade(t *testing.T) { @@ -34,7 +34,7 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) { if ver != 21 { t.Skip("We only want to run this test for v21 release") } - clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync) + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 8a120d1c971..5b481de235f 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -32,7 +32,7 @@ import ( "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/mysql" @@ -72,7 +72,7 @@ func SetupReparentCluster(t *testing.T, durability string) *cluster.LocalProcess // SetupRangeBasedCluster sets up the range based cluster func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalProcessCluster { - return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, reparentutil.DurabilitySemiSync) + return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, policy.DurabilitySemiSync) } // SetupShardedReparentCluster is used to setup a sharded cluster for testing diff --git a/go/test/endtoend/transaction/benchmark/bench_test.go b/go/test/endtoend/transaction/benchmark/bench_test.go index 891ffa3d7b3..553919f893e 100644 --- a/go/test/endtoend/transaction/benchmark/bench_test.go +++ b/go/test/endtoend/transaction/benchmark/bench_test.go @@ -30,7 +30,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -66,7 +66,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 1, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go index 72f238056ab..3516bdefe05 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/main_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -79,7 +79,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -90,7 +90,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go index 7c82fcccceb..7a2f7e8676e 100644 --- a/go/test/endtoend/transaction/twopc/main_test.go +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -32,7 +32,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -93,7 +93,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go index c38a4002c08..0018f5d45d8 100644 --- a/go/test/endtoend/transaction/twopc/metric/main_test.go +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -79,7 +79,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go index 9c831cecf0d..977fa3f6fd6 100644 --- a/go/test/endtoend/transaction/twopc/stress/main_test.go +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -81,7 +81,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -92,7 +92,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/tx_test.go b/go/test/endtoend/transaction/tx_test.go index a30fcdb062d..fd162cb3d41 100644 --- a/go/test/endtoend/transaction/tx_test.go +++ b/go/test/endtoend/transaction/tx_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -70,7 +70,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 5842c0434de..9c2d51c8cf7 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -31,7 +31,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/logic" ) @@ -45,7 +45,7 @@ func TestDownPrimary(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, reparentutil.DurabilitySemiSync) + }, 1, policy.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -116,7 +116,7 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, reparentutil.DurabilityNone) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, policy.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] curPrimary := shard0.Vttablets[0] @@ -171,7 +171,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, reparentutil.DurabilityNone) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, policy.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -242,7 +242,7 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, reparentutil.DurabilitySemiSync) + }, 1, policy.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 573927cdcb3..0a2d516fe63 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -40,7 +40,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" // Register topo implementations. _ "vitess.io/vitess/go/vt/topo/consultopo" @@ -300,7 +300,7 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep } if durability == "" { - durability = reparentutil.DurabilityNone + durability = policy.DurabilityNone } out, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) require.NoError(t, err, out) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index c3dc22d21b4..706e0bec92a 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -71,6 +71,7 @@ import ( "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vtenv" @@ -668,7 +669,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } @@ -698,7 +699,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch // Since we want to check the durability rules for the desired state and not before we make that change expectedTablet := tablet.Tablet.CloneVT() expectedTablet.Type = req.DbType - err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) + err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) if err != nil { return nil, err } @@ -2776,7 +2777,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } @@ -2860,7 +2861,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( // position logger.Infof("initializing primary on %v", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) event.DispatchUpdate(ev, "initializing primary") - rp, err := tmc.InitPrimary(ctx, primaryElectTabletInfo.Tablet, reparentutil.SemiSyncAckers(durability, primaryElectTabletInfo.Tablet) > 0) + rp, err := tmc.InitPrimary(ctx, primaryElectTabletInfo.Tablet, policy.SemiSyncAckers(durability, primaryElectTabletInfo.Tablet) > 0) if err != nil { return err } @@ -2901,7 +2902,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( go func(alias string, tabletInfo *topo.TabletInfo) { defer wgReplicas.Done() logger.Infof("initializing replica %v", alias) - if err := tmc.InitReplica(replCtx, tabletInfo.Tablet, req.PrimaryElectTabletAlias, rp, now, reparentutil.IsReplicaSemiSync(durability, primaryElectTabletInfo.Tablet, tabletInfo.Tablet)); err != nil { + if err := tmc.InitReplica(replCtx, tabletInfo.Tablet, req.PrimaryElectTabletAlias, rp, now, policy.IsReplicaSemiSync(durability, primaryElectTabletInfo.Tablet, tabletInfo.Tablet)); err != nil { rec.RecordError(fmt.Errorf("tablet %v InitReplica failed: %v", alias, err)) } }(alias, tabletInfo) @@ -3598,12 +3599,12 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet), 0); err != nil { + if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet), 0); err != nil { return nil, err } @@ -3787,7 +3788,7 @@ func (s *VtctldServer) SetKeyspaceDurabilityPolicy(ctx context.Context, req *vtc return nil, err } - policyValid := reparentutil.CheckDurabilityPolicyExists(req.DurabilityPolicy) + policyValid := policy.CheckDurabilityPolicyExists(req.DurabilityPolicy) if !policyValid { err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) return nil, err @@ -4308,12 +4309,12 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.StartReplication(ctx, tablet.Tablet, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { + if err = s.tmc.StartReplication(ctx, tablet.Tablet, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { log.Errorf("StartReplication: failed to start replication on %v: %v", alias, err) return nil, err } @@ -4413,12 +4414,12 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_PRIMARY, reparentutil.SemiSyncAckers(durability, tablet.Tablet) > 0); err != nil { + if err = s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet.Tablet) > 0); err != nil { log.Warningf("ChangeType(%v, PRIMARY): %v", topoproto.TabletAliasString(req.Tablet), err) return nil, err } diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index bfea278e245..34b761ba2c6 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -30,6 +30,7 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2777,14 +2778,14 @@ func TestCreateKeyspace(t *testing.T) { req: &vtctldatapb.CreateKeyspaceRequest{ Name: "testkeyspace", Type: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, }, expected: &vtctldatapb.CreateKeyspaceResponse{ Keyspace: &vtctldatapb.Keyspace{ Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, }, }, }, @@ -11338,11 +11339,11 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { }, req: &vtctldatapb.SetKeyspaceDurabilityPolicyRequest{ Keyspace: "ks1", - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, }, expected: &vtctldatapb.SetKeyspaceDurabilityPolicyResponse{ Keyspace: &topodatapb.Keyspace{ - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, }, }, }, diff --git a/go/vt/vtctl/reparentutil/durability_funcs.go b/go/vt/vtctl/reparentutil/durability_funcs.go index 63e123a685d..da53c1a3e15 100644 --- a/go/vt/vtctl/reparentutil/durability_funcs.go +++ b/go/vt/vtctl/reparentutil/durability_funcs.go @@ -19,16 +19,17 @@ package reparentutil import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) // SemiSyncAckersForPrimary returns the list of tablets which are capable of sending Semi-Sync Acks for the given primary tablet -func SemiSyncAckersForPrimary(durability Durabler, primary *topodatapb.Tablet, allTablets []*topodatapb.Tablet) (semiSyncAckers []*topodatapb.Tablet) { +func SemiSyncAckersForPrimary(durability policy.Durabler, primary *topodatapb.Tablet, allTablets []*topodatapb.Tablet) (semiSyncAckers []*topodatapb.Tablet) { for _, tablet := range allTablets { if topoproto.TabletAliasEqual(primary.Alias, tablet.Alias) { continue } - if IsReplicaSemiSync(durability, primary, tablet) { + if policy.IsReplicaSemiSync(durability, primary, tablet) { semiSyncAckers = append(semiSyncAckers, tablet) } } @@ -37,7 +38,7 @@ func SemiSyncAckersForPrimary(durability Durabler, primary *topodatapb.Tablet, a // haveRevokedForTablet checks whether we have reached enough tablets such that the given primary eligible tablet cannot accept any new writes // The tablets reached should have their replication stopped and must be set to read only. -func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { +func haveRevokedForTablet(durability policy.Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { // if we have reached the primaryEligible tablet and stopped its replication and marked it read only, then it will not // accept any new writes if topoproto.IsTabletInList(primaryEligible, tabletsReached) { @@ -51,7 +52,7 @@ func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Table allSemiSyncAckers := SemiSyncAckersForPrimary(durability, primaryEligible, allTablets) // numOfSemiSyncAcksRequired is the number of semi sync Acks that the primaryEligible tablet requires - numOfSemiSyncAcksRequired := SemiSyncAckers(durability, primaryEligible) + numOfSemiSyncAcksRequired := policy.SemiSyncAckers(durability, primaryEligible) // if we have reached enough semi-sync Acking tablets such that the primaryEligible cannot accept a write // we have revoked from the tablet @@ -61,9 +62,9 @@ func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Table // haveRevoked checks whether we have reached enough tablets to guarantee that no tablet eligible to become a primary can accept any write // All the tablets reached must have their replication stopped and set to read only for us to guarantee that we have revoked access // from all the primary eligible tablets (prevent them from accepting any new writes) -func haveRevoked(durability Durabler, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { +func haveRevoked(durability policy.Durabler, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { for _, tablet := range allTablets { - if PromotionRule(durability, tablet) == promotionrule.MustNot { + if policy.PromotionRule(durability, tablet) == promotionrule.MustNot { continue } if !haveRevokedForTablet(durability, tablet, tabletsReached, allTablets) { @@ -74,7 +75,7 @@ func haveRevoked(durability Durabler, tabletsReached []*topodatapb.Tablet, allTa } // canEstablishForTablet checks whether we have reached enough tablets to say that the given primary eligible tablet will be able to accept new writes -func canEstablishForTablet(durability Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet) bool { +func canEstablishForTablet(durability policy.Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet) bool { // if we have not reached the primaryEligible tablet, then it cannot be considered eligible to accept writes // since it might have been stopped if !topoproto.IsTabletInList(primaryEligible, tabletsReached) { @@ -85,7 +86,7 @@ func canEstablishForTablet(durability Durabler, primaryEligible *topodatapb.Tabl semiSyncAckersReached := SemiSyncAckersForPrimary(durability, primaryEligible, tabletsReached) // numOfSemiSyncAcksRequired is the number of semi sync Acks that the primaryEligible tablet requires - numOfSemiSyncAcksRequired := SemiSyncAckers(durability, primaryEligible) + numOfSemiSyncAcksRequired := policy.SemiSyncAckers(durability, primaryEligible) // if we have reached enough semi-sync Acking tablets such that the primaryEligible can accept a write // we can safely promote this tablet diff --git a/go/vt/vtctl/reparentutil/durability_funcs_test.go b/go/vt/vtctl/reparentutil/durability_funcs_test.go index 546ae47df20..737d3e40346 100644 --- a/go/vt/vtctl/reparentutil/durability_funcs_test.go +++ b/go/vt/vtctl/reparentutil/durability_funcs_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -73,25 +74,25 @@ func TestSemiSyncAckersForPrimary(t *testing.T) { }{ { name: "no other tablets", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet}, wantSemiSyncAckers: nil, }, { name: "'none' durability policy", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: nil, }, { name: "'semi_sync' durability policy", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet}, }, { name: "'cross_cell' durability policy", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaCrossCellTablet}, @@ -99,7 +100,7 @@ func TestSemiSyncAckersForPrimary(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err, "error setting durability policy") semiSyncAckers := SemiSyncAckersForPrimary(durability, tt.primary, tt.allTablets) require.Equal(t, tt.wantSemiSyncAckers, semiSyncAckers) @@ -118,7 +119,7 @@ func Test_haveRevokedForTablet(t *testing.T) { }{ { name: "'none' durability policy - not revoked", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -129,7 +130,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'none' durability policy - revoked", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -140,7 +141,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -151,7 +152,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -162,7 +163,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -173,7 +174,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -184,7 +185,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - primary in list", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -197,7 +198,7 @@ func Test_haveRevokedForTablet(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) out := haveRevokedForTablet(durability, tt.primaryEligible, tt.tabletsReached, tt.allTablets) require.Equal(t, tt.revoked, out) @@ -215,7 +216,7 @@ func Test_haveRevoked(t *testing.T) { }{ { name: "'none' durability policy - all tablets revoked", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -225,7 +226,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - all tablets revoked", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -235,7 +236,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - all tablets revoked", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -245,7 +246,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - revoked", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, }, @@ -255,7 +256,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyTablet, }, @@ -265,7 +266,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ replicaCrossCellTablet, }, @@ -275,7 +276,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - not revoked", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -285,7 +286,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -295,7 +296,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -307,7 +308,7 @@ func Test_haveRevoked(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) out := haveRevoked(durability, tt.tabletsReached, tt.allTablets) require.Equal(t, tt.revoked, out) @@ -325,7 +326,7 @@ func Test_canEstablishForTablet(t *testing.T) { }{ { name: "primary not reached", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -333,7 +334,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -341,7 +342,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -349,7 +350,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "established", - durabilityPolicy: DurabilityNone, + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, @@ -357,7 +358,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: DurabilitySemiSync, + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, @@ -365,7 +366,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: DurabilityCrossCell, + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, @@ -375,7 +376,7 @@ func Test_canEstablishForTablet(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("'%s' durability policy - %s", tt.durabilityPolicy, tt.name), func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) require.Equalf(t, tt.canEstablish, canEstablishForTablet(durability, tt.primaryEligible, tt.tabletsReached), "canEstablishForTablet(%v, %v)", tt.primaryEligible, tt.tabletsReached) }) diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 70faf8958c7..5f7d3140c7b 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/event" "vitess.io/vitess/go/sets" @@ -66,7 +67,7 @@ type EmergencyReparentOptions struct { // Private options managed internally. We use value passing to avoid leaking // these details back out. lockAction string - durability Durabler + durability policy.Durabler } // counters for Emergency Reparent Shard @@ -181,7 +182,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve } erp.logger.Infof("Getting a new durability policy for %v", keyspaceDurability) - opts.durability, err = GetDurabilityPolicy(keyspaceDurability) + opts.durability, err = policy.GetDurabilityPolicy(keyspaceDurability) if err != nil { return err } @@ -539,11 +540,11 @@ func (erp *EmergencyReparenter) reparentReplicas( if ev.ShardInfo.PrimaryAlias == nil { erp.logger.Infof("setting up %v as new primary for an uninitialized cluster", alias) // we call InitPrimary when the PrimaryAlias in the ShardInfo is empty. This happens when we have an uninitialized cluster. - position, err = erp.tmc.InitPrimary(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + position, err = erp.tmc.InitPrimary(primaryCtx, tablet, policy.SemiSyncAckers(opts.durability, tablet) > 0) } else { erp.logger.Infof("starting promotion for the new primary - %v", alias) // we call PromoteReplica which changes the tablet type, fixes the semi-sync, set the primary to read-write and flushes the binlogs - position, err = erp.tmc.PromoteReplica(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + position, err = erp.tmc.PromoteReplica(primaryCtx, tablet, policy.SemiSyncAckers(opts.durability, tablet) > 0) } if err != nil { return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", alias, err) @@ -574,7 +575,7 @@ func (erp *EmergencyReparenter) reparentReplicas( forceStart = fs } - err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet), 0) + err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, policy.IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet), 0) if err != nil { err = vterrors.Wrapf(err, "tablet %v SetReplicationSource failed: %v", alias, err) rec.RecordError(err) @@ -746,7 +747,7 @@ func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb for _, tablet := range validTablets { tabletAliasStr := topoproto.TabletAliasString(tablet.Alias) // Remove tablets which have MustNot promote rule since they must never be promoted - if PromotionRule(opts.durability, tablet) == promotionrule.MustNot { + if policy.PromotionRule(opts.durability, tablet) == promotionrule.MustNot { erp.logger.Infof("Removing %s from list of valid candidates for promotion because it has the Must Not promote rule", tabletAliasStr) if opts.NewPrimaryAlias != nil && topoproto.TabletAliasEqual(opts.NewPrimaryAlias, tablet.Alias) { return nil, vterrors.Errorf(vtrpc.Code_ABORTED, "proposed primary %s has a must not promotion rule", topoproto.TabletAliasString(opts.NewPrimaryAlias)) diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 0129397b415..840df41d6e2 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql/replication" logutilpb "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/mysql" @@ -129,7 +130,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ { name: "success", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -238,7 +239,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success - 1 replica and 1 rdonly failure", - durability: DurabilitySemiSync, + durability: policy.DurabilitySemiSync, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -372,7 +373,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { // Here, all our tablets are tied, so we're going to explicitly pick // zone1-101. name: "success with requested primary-elect", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 101, @@ -483,7 +484,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success with existing primary", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { @@ -594,7 +595,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard not found", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, unlockTopo: true, // we shouldn't try to lock the nonexistent shard @@ -607,7 +608,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot stop replication", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -666,7 +667,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "lost topo lock", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -725,7 +726,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot get reparent candidates", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -799,7 +800,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "zero valid reparent candidates", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, shards: []*vtctldatapb.Shard{ @@ -816,7 +817,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "error waiting for relay logs to apply", - durability: DurabilityNone, + durability: policy.DurabilityNone, // one replica is going to take a minute to apply relay logs emergencyReparentOps: EmergencyReparentOptions{ WaitReplicasTimeout: time.Millisecond * 50, @@ -911,7 +912,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not in tablet map", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 200, @@ -1001,7 +1002,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not winning primary-elect", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication Cell: "zone1", Uid: 102, @@ -1124,7 +1125,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot promote new primary", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 102, @@ -1237,7 +1238,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "promotion-rule - no valid candidates for emergency reparent", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1344,7 +1345,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary - must not promotion rule", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1456,7 +1457,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cross cell - no valid candidates", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1575,7 +1576,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary in a different cell", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ PreventCrossCellPromotion: true, NewPrimaryAlias: &topodatapb.TabletAlias{ @@ -1700,7 +1701,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary cannot make progress", - durability: DurabilityCrossCell, + durability: policy.DurabilityCrossCell, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1815,7 +1816,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "expected primary mismatch", - durability: DurabilityNone, + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ ExpectedPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -2333,7 +2334,7 @@ func TestEmergencyReparenter_promotionOfNewPrimary(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3021,7 +3022,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -3502,7 +3503,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.remoteOpTimeout != 0 { @@ -4092,7 +4093,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4336,7 +4337,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) test.emergencyReparentOps.durability = durability logger := logutil.NewMemoryLogger() @@ -4355,7 +4356,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { // TestParentContextCancelled tests that even if the parent context of reparentReplicas cancels, we should not cancel the context of // SetReplicationSource since there could be tablets that are running it even after ERS completes. func TestParentContextCancelled(t *testing.T) { - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) // Setup ERS options with a very high wait replicas timeout emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} @@ -4486,28 +4487,28 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { }{ { name: "filter must not", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet}, }, { name: "host taking backup must not be on the list when there are other candidates", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "host taking backup must be the only one on the list when there are no other candidates", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaTablet}, }, { name: "filter cross cell", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4523,14 +4524,14 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, }, { name: "filter establish", - durability: DurabilityCrossCell, + durability: policy.DurabilityCrossCell, validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: nil, }, { name: "filter mixed", - durability: DurabilityCrossCell, + durability: policy.DurabilityCrossCell, prevPrimary: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone-2", @@ -4545,7 +4546,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "error - requested primary must not", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4555,7 +4556,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-1-0000000003 has a must not promotion rule", }, { name: "error - requested primary not in same cell", - durability: DurabilityNone, + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4567,7 +4568,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-2-0000000002 is is a different cell as the previous primary", }, { name: "error - requested primary cannot establish", - durability: DurabilityCrossCell, + durability: policy.DurabilityCrossCell, validTablets: allTablets, tabletsTakingBackup: noTabletsTakingBackup, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, @@ -4579,7 +4580,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durability) + durability, err := policy.GetDurabilityPolicy(tt.durability) require.NoError(t, err) tt.opts.durability = durability logger := logutil.NewMemoryLogger() @@ -5525,7 +5526,7 @@ func TestEmergencyReparenterFindErrantGTIDs(t *testing.T) { slices.Sort(keys) require.ElementsMatch(t, tt.wantedCandidates, keys) - dp, err := GetDurabilityPolicy(DurabilitySemiSync) + dp, err := policy.GetDurabilityPolicy(policy.DurabilitySemiSync) require.NoError(t, err) ers := EmergencyReparenter{logger: logutil.NewCallbackLogger(func(*logutilpb.Event) {})} winningPrimary, _, err := ers.findMostAdvanced(candidates, tt.tabletMap, EmergencyReparentOptions{durability: dp}) diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 91669e33b5f..dcd6dc7c590 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -36,6 +36,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -71,7 +72,7 @@ type PlannedReparentOptions struct { // back out to the caller. lockAction string - durability Durabler + durability policy.Durabler } // NewPlannedReparenter returns a new PlannedReparenter object, ready to perform @@ -256,7 +257,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( setSourceCtx, setSourceCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) defer setSourceCancel() - if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect), 0); err != nil { + if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, policy.IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect), 0); err != nil { return vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) } @@ -304,7 +305,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( undoCtx, undoCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer undoCancel() - if undoErr := pr.tmc.UndoDemotePrimary(undoCtx, currentPrimary.Tablet, SemiSyncAckers(opts.durability, currentPrimary.Tablet) > 0); undoErr != nil { + if undoErr := pr.tmc.UndoDemotePrimary(undoCtx, currentPrimary.Tablet, policy.SemiSyncAckers(opts.durability, currentPrimary.Tablet) > 0); undoErr != nil { pr.logger.Warningf("encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr) finalWaitErr = vterrors.Wrapf(finalWaitErr, "encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr) } @@ -332,7 +333,7 @@ func (pr *PlannedReparenter) performInitialPromotion( // This is done to guarantee safety, in the sense that the semi-sync is on before we start accepting writes. // However, during initialization, it is likely that the database would not be created in the MySQL instance. // Therefore, we have to first set read-write mode, create the database and then fix semi-sync, otherwise we get blocked. - rp, err := pr.tmc.InitPrimary(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0) + rp, err := pr.tmc.InitPrimary(promoteCtx, primaryElect, policy.SemiSyncAckers(opts.durability, primaryElect) > 0) if err != nil { return "", vterrors.Wrapf(err, "primary-elect tablet %v failed to be promoted to primary; please try again", primaryElectAliasStr) } @@ -521,7 +522,7 @@ func (pr *PlannedReparenter) reparentShardLocked( } pr.logger.Infof("Getting a new durability policy for %v", keyspaceDurability) - opts.durability, err = GetDurabilityPolicy(keyspaceDurability) + opts.durability, err = policy.GetDurabilityPolicy(keyspaceDurability) if err != nil { return err } @@ -693,7 +694,7 @@ func (pr *PlannedReparenter) reparentTablets( // that it needs to start replication after transitioning from // PRIMARY => REPLICA. forceStartReplication := false - if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet), 0); err != nil { + if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, policy.IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet), 0); err != nil { rec.RecordError(vterrors.Wrapf(err, "tablet %v failed to SetReplicationSource(%v): %v", alias, primaryElectAliasStr, err)) } }(alias, tabletInfo.Tablet) @@ -702,7 +703,7 @@ func (pr *PlannedReparenter) reparentTablets( // If `PromoteReplica` call is required, we should call it and use the position that it returns. if promoteReplicaRequired { // Promote the candidate primary to type:PRIMARY. - primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, SemiSyncAckers(opts.durability, ev.NewPrimary) > 0) + primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, policy.SemiSyncAckers(opts.durability, ev.NewPrimary) > 0) if err != nil { pr.logger.Warningf("primary %v failed to PromoteReplica; cancelling replica reparent attempts", primaryElectAliasStr) replCancel() diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index b0432d666c4..148d9fd812c 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -25,6 +25,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/test/utils" @@ -557,6 +558,11 @@ func TestPlannedReparenter_getLockAction(t *testing.T) { } } +func getDurabilityPolicy(policyName string) policy.Durabler { + p, _ := policy.GetDurabilityPolicy(policyName) + return p +} + func TestPlannedReparenter_preflightChecks(t *testing.T) { t.Parallel() @@ -791,7 +797,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityNone{}, }, expectedIsNoop: false, expectedEvent: &events.Reparent{ @@ -819,7 +824,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 100, }, - durability: &durabilityNone{}, + durability: getDurabilityPolicy(policy.DurabilityNone), }, shouldErr: false, }, @@ -889,7 +894,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityNone{}, }, expectedIsNoop: false, expectedEvent: &events.Reparent{ @@ -917,7 +921,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 101, }, - durability: &durabilityNone{}, + durability: getDurabilityPolicy(policy.DurabilityNone), }, shouldErr: false, }, @@ -1148,7 +1152,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityCrossCell{}, + durability: getDurabilityPolicy(policy.DurabilityCrossCell), }, expectedIsNoop: true, expectedEvent: &events.Reparent{ @@ -1186,7 +1190,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.opts.durability == nil { - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) tt.opts.durability = durability } @@ -1799,7 +1803,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) tt.opts.durability = durability @@ -1946,7 +1950,7 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) pos, err := pr.performInitialPromotion( ctx, @@ -3423,7 +3427,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }{ { name: "success - durability = none", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3490,7 +3494,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, { name: "success - durability = semi_sync", - durability: DurabilitySemiSync, + durability: policy.DurabilitySemiSync, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3556,7 +3560,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "success - promote replica required", - durability: DurabilitySemiSync, + durability: policy.DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3632,7 +3636,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "Promote replica failed", - durability: DurabilitySemiSync, + durability: policy.DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3977,11 +3981,11 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { t.Parallel() pr := NewPlannedReparenter(nil, tt.tmc, logger) - durabilityPolicy := DurabilityNone + durabilityPolicy := policy.DurabilityNone if tt.durability != "" { durabilityPolicy = tt.durability } - durability, err := GetDurabilityPolicy(durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(durabilityPolicy) require.NoError(t, err) tt.opts.durability = durability err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.promoteReplicaRequired, tt.tabletMap, tt.opts) diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/policy/durability.go similarity index 99% rename from go/vt/vtctl/reparentutil/durability.go rename to go/vt/vtctl/reparentutil/policy/durability.go index 31a110bdbd8..bad6846ef29 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/policy/durability.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparentutil +package policy import ( "fmt" diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/policy/durability_test.go similarity index 99% rename from go/vt/vtctl/reparentutil/durability_test.go rename to go/vt/vtctl/reparentutil/policy/durability_test.go index 52480362be6..441275f29bf 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/policy/durability_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparentutil +package policy import ( "testing" diff --git a/go/vt/vtctl/reparentutil/reparent_sorter.go b/go/vt/vtctl/reparentutil/reparent_sorter.go index ea7367bd36b..2f9c3c9ea8d 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter.go @@ -20,6 +20,7 @@ import ( "sort" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -32,11 +33,11 @@ type reparentSorter struct { tablets []*topodatapb.Tablet positions []replication.Position innodbBufferPool []int - durability Durabler + durability policy.Durabler } // newReparentSorter creates a new reparentSorter -func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability Durabler) *reparentSorter { +func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability policy.Durabler) *reparentSorter { return &reparentSorter{ tablets: tablets, positions: positions, @@ -82,8 +83,8 @@ func (rs *reparentSorter) Less(i, j int) bool { // at this point, both have the same GTIDs // so we check their promotion rules - jPromotionRule := PromotionRule(rs.durability, rs.tablets[j]) - iPromotionRule := PromotionRule(rs.durability, rs.tablets[i]) + jPromotionRule := policy.PromotionRule(rs.durability, rs.tablets[j]) + iPromotionRule := policy.PromotionRule(rs.durability, rs.tablets[i]) // If the promotion rules are different then we want to sort by the promotion rules. if len(rs.innodbBufferPool) != 0 && jPromotionRule == iPromotionRule { @@ -100,7 +101,7 @@ func (rs *reparentSorter) Less(i, j int) bool { // sortTabletsForReparent sorts the tablets, given their positions for emergency reparent shard and planned reparent shard. // Tablets are sorted first by their replication positions, with ties broken by the promotion rules. -func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability Durabler) error { +func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability policy.Durabler) error { // throw an error internal error in case of unequal number of tablets and positions // fail-safe code prevents panic in sorting in case the lengths are unequal if len(tablets) != len(positions) { diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go index 3dfcdbb5228..86aa129f1a4 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/replication" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) // TestReparentSorter tests that the sorting for tablets works correctly @@ -135,7 +136,7 @@ func TestReparentSorter(t *testing.T) { }, } - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index 17dbaeae015..1e1c2b98369 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -153,12 +154,12 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } - isSemiSync := IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet) + isSemiSync := policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet) return tmc.SetReplicationSource(ctx, tablet, shardPrimary.Alias, 0, "", false, isSemiSync, 0) } @@ -183,7 +184,7 @@ func stopReplicationAndBuildStatusMaps( stopReplicationTimeout time.Duration, ignoredTablets sets.Set[string], tabletToWaitFor *topodatapb.TabletAlias, - durability Durabler, + durability policy.Durabler, waitForAllTablets bool, logger logutil.Logger, ) (*replicationSnapshot, error) { diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 8f867101376..1f8e5d097b7 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" @@ -289,7 +290,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ { name: "success", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -358,7 +359,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "success with wait for all tablets", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -428,7 +429,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "timing check with wait for all tablets", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -514,7 +515,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 2 rdonly failures", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -608,7 +609,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 1 rdonly and 1 replica failures", - durability: DurabilitySemiSync, + durability: policy.DurabilitySemiSync, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -702,7 +703,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "ignore tablets", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -762,7 +763,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "have PRIMARY tablet and can demote", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -841,7 +842,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet is PRIMARY and cannot demote", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -906,7 +907,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets are PRIMARY and cannot demote", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -959,7 +960,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "stopReplicasTimeout exceeded", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000100": time.Minute, // zone1-0000000100 will timeout and not be included @@ -1023,7 +1024,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet fails to StopReplication", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1080,7 +1081,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets fail StopReplication", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1121,7 +1122,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: true, }, { name: "1 tablets fail StopReplication and 1 has replication stopped", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1166,7 +1167,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "slow tablet is the new primary requested", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000102": 1 * time.Second, // zone1-0000000102 is slow to respond but has to be included since it is the requested primary @@ -1268,7 +1269,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "Handle nil replication status After. No segfaulting when determining backup status, and fall back to Before status", - durability: DurabilityNone, + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1340,7 +1341,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durability) + durability, err := policy.GetDurabilityPolicy(tt.durability) require.NoError(t, err) startTime := time.Now() res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, tt.waitForAllTablets, logger) diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index c4c23e65c7e..4b8a4cbc431 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -345,9 +346,9 @@ func findCandidate( } // getTabletsWithPromotionRules gets the tablets with the given promotion rule from the list of tablets -func getTabletsWithPromotionRules(durability Durabler, tablets []*topodatapb.Tablet, rule promotionrule.CandidatePromotionRule) (res []*topodatapb.Tablet) { +func getTabletsWithPromotionRules(durability policy.Durabler, tablets []*topodatapb.Tablet, rule promotionrule.CandidatePromotionRule) (res []*topodatapb.Tablet) { for _, candidate := range tablets { - promotionRule := PromotionRule(durability, candidate) + promotionRule := policy.PromotionRule(durability, candidate) if promotionRule == rule { res = append(res, candidate) } diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index c754013fb4f..276bab2e443 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/utils" @@ -1014,7 +1015,7 @@ zone1-0000000100 is not a replica`, }, } - durability, err := GetDurabilityPolicy(DurabilityNone) + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1829,7 +1830,7 @@ func Test_getTabletsWithPromotionRules(t *testing.T) { filteredTablets: nil, }, } - durability, _ := GetDurabilityPolicy(DurabilityNone) + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res := getTabletsWithPromotionRules(durability, tt.tablets, tt.rule) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 5438c2dd1c3..9ad64c3d4fd 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -100,7 +100,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/ptr" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/flagutil" @@ -1819,7 +1819,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags keyspaceType := subFlags.String("keyspace_type", "", "Specifies the type of the keyspace") baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") - durabilityPolicy := subFlags.String("durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + durabilityPolicy := subFlags.String("durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") sidecarDBName := subFlags.String("sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") if err := subFlags.Parse(args); err != nil { return err @@ -1841,7 +1841,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags var snapshotTime *vttime.Time if ktype == topodatapb.KeyspaceType_SNAPSHOT { - if *durabilityPolicy != reparentutil.DurabilityNone { + if *durabilityPolicy != policy.DurabilityNone { return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "durability-policy cannot be specified while creating a snapshot keyspace") } if *baseKeyspace == "" { diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index a62bffe1178..4166ca3293b 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/wrangler" @@ -54,7 +54,7 @@ func TestAPI(t *testing.T) { defer server.Close() ks1 := &topodatapb.Keyspace{ - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, SidecarDbName: "_vt_sidecar_ks1", } diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index 07830bf7dda..c1f84d6b3f2 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -30,7 +30,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/util" @@ -54,7 +54,7 @@ type clusterAnalysis struct { hasClusterwideAction bool totalTablets int primaryAlias string - durability reparentutil.Durabler + durability policy.Durabler } // GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc) @@ -388,7 +388,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna log.Errorf("ignoring keyspace %v because no durability_policy is set. Please set it using SetKeyspaceDurabilityPolicy", a.AnalyzedKeyspace) return nil } - durability, err := reparentutil.GetDurabilityPolicy(durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(durabilityPolicy) if err != nil { log.Errorf("can't get the durability policy %v - %v. Skipping keyspace - %v.", durabilityPolicy, err, a.AnalyzedKeyspace) return nil @@ -443,11 +443,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = PrimaryIsReadOnly a.Description = "Primary is read-only" // - } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && policy.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustBeSet a.Description = "Primary semi-sync must be set" // - } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && policy.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustNotBeSet a.Description = "Primary semi-sync must not be set" // @@ -485,11 +485,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = ReplicationStopped a.Description = "Replication is stopped" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && policy.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustBeSet a.Description = "Replica semi-sync must be set" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !policy.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustNotBeSet a.Description = "Replica semi-sync must not be set" // diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index ba11ab9e3f2..b0595fbac14 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -25,7 +25,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/test" ) @@ -71,7 +71,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -90,7 +90,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, ShardPrimaryTermTimestamp: "2022-12-28 07:23:25.129898+00:00", - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -108,7 +108,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 4, @@ -130,7 +130,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 0, IsPrimary: 1, @@ -150,7 +150,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 3, IsPrimary: 1, @@ -170,7 +170,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 2, @@ -192,7 +192,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -213,7 +213,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -235,7 +235,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -257,7 +257,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -279,7 +279,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -316,7 +316,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -334,7 +334,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -356,7 +356,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -374,7 +374,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 102}, }, @@ -396,7 +396,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -414,7 +414,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -437,7 +437,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -455,7 +455,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -478,7 +478,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -496,7 +496,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -521,7 +521,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -543,7 +543,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 0, @@ -563,7 +563,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -584,7 +584,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 1, @@ -606,7 +606,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { }, // Snapshot Keyspace KeyspaceType: 1, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -644,7 +644,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -664,7 +664,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, IsInvalid: 1, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -681,7 +681,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, IsInvalid: 1, }, { TabletInfo: &topodatapb.Tablet{ @@ -723,7 +723,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, IsInvalid: 1, }}, keyspaceWanted: "ks", @@ -741,7 +741,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -759,7 +759,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, @@ -782,7 +782,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -800,7 +800,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, diff --git a/go/vt/vtorc/inst/keyspace_dao.go b/go/vt/vtorc/inst/keyspace_dao.go index d764e3fc56a..4271886121e 100644 --- a/go/vt/vtorc/inst/keyspace_dao.go +++ b/go/vt/vtorc/inst/keyspace_dao.go @@ -22,7 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -80,10 +80,10 @@ func SaveKeyspace(keyspace *topo.KeyspaceInfo) error { } // GetDurabilityPolicy gets the durability policy for the given keyspace. -func GetDurabilityPolicy(keyspace string) (reparentutil.Durabler, error) { +func GetDurabilityPolicy(keyspace string) (policy.Durabler, error) { ki, err := ReadKeyspace(keyspace) if err != nil { return nil, err } - return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) + return policy.GetDurabilityPolicy(ki.DurabilityPolicy) } diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index a9d99b2ac80..ef2dd67379e 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -24,7 +24,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -48,7 +48,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, }, keyspaceWanted: nil, semiSyncAckersWanted: 1, @@ -72,12 +72,12 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, }, semiSyncAckersWanted: 0, }, { @@ -120,7 +120,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { return } require.NoError(t, err) - require.EqualValues(t, tt.semiSyncAckersWanted, reparentutil.SemiSyncAckers(durabilityPolicy, nil)) + require.EqualValues(t, tt.semiSyncAckersWanted, policy.SemiSyncAckers(durabilityPolicy, nil)) }) } } diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go index 09d0535d132..8218af45db6 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -28,7 +28,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" @@ -37,15 +37,15 @@ import ( var ( keyspaceDurabilityNone = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, } keyspaceDurabilitySemiSync = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, } keyspaceDurabilityTest = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityTest, + DurabilityPolicy: policy.DurabilityTest, } keyspaceSnapshot = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, @@ -107,7 +107,7 @@ func TestRefreshAllKeyspaces(t *testing.T) { // Set clusters to watch to watch all keyspaces clustersToWatch = nil // Change the durability policy of ks1 - reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", policy.DurabilitySemiSync) require.NoError(t, RefreshAllKeyspacesAndShards(context.Background())) // Verify that all the keyspaces are correctly reloaded @@ -145,7 +145,7 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilitySemiSync, + DurabilityPolicy: policy.DurabilitySemiSync, }, keyspaceWanted: nil, err: "", @@ -170,12 +170,12 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: reparentutil.DurabilityNone, + DurabilityPolicy: policy.DurabilityNone, }, err: "", }, { diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index f14eca624c9..0d0bbff5b53 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -29,6 +29,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/util" @@ -739,7 +740,7 @@ func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r return false, topologyRecovery, err } - if err := tabletUndoDemotePrimary(ctx, analyzedTablet, reparentutil.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { + if err := tabletUndoDemotePrimary(ctx, analyzedTablet, policy.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { return true, topologyRecovery, err } return true, topologyRecovery, nil @@ -782,7 +783,7 @@ func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r return true, topologyRecovery, err } - err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet), float64(analysisEntry.ReplicaNetTimeout)/2) + err = setReplicationSource(ctx, analyzedTablet, primaryTablet, policy.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet), float64(analysisEntry.ReplicaNetTimeout)/2) return true, topologyRecovery, err } @@ -817,6 +818,6 @@ func recoverErrantGTIDDetected(ctx context.Context, analysisEntry *inst.Replicat return false, topologyRecovery, err } - err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, policy.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) return true, topologyRecovery, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 22fe72716dd..9f906317edf 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -22,7 +22,7 @@ import ( "time" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" @@ -136,12 +136,12 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req l.Errorf("Failed to get durability policy, error: %v", err) return } - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { l.Errorf("Failed to get durability with name %v, error: %v", durabilityName, err) } - isSemiSync := reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) + isSemiSync := policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) semiSyncAction, err := tm.convertBoolToSemiSyncAction(bgCtx, isSemiSync) if err != nil { l.Errorf("Failed to convert bool to semisync action, error: %v", err) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 84150c82be8..fbef04de357 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -70,7 +70,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" @@ -1011,7 +1011,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t return "", vterrors.Wrapf(err, "cannot read keyspace durability policy %v", tablet.Keyspace) } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return "", vterrors.Wrapf(err, "cannot get durability policy %v", durabilityName) } @@ -1020,7 +1020,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t tablet.Type = tabletType - semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, policy.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) if err != nil { return "", err } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 1a3a45cf99b..e17f56de11f 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -131,7 +132,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } @@ -152,7 +153,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl }() event.DispatchUpdate(ev, "starting external reparent") - if err := wr.tmc.ChangeType(ctx, tablet, topodatapb.TabletType_PRIMARY, reparentutil.SemiSyncAckers(durability, tablet) > 0); err != nil { + if err := wr.tmc.ChangeType(ctx, tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet) > 0); err != nil { log.Warningf("Error calling ChangeType on new primary %v: %v", topoproto.TabletAliasString(newPrimaryAlias), err) return err } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index fdc6f9a92ac..31a5a7936ad 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -141,12 +142,12 @@ func (wr *Wrangler) shouldSendSemiSyncAck(ctx context.Context, tablet *topodatap if err != nil { return false, err } - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return false, err } - return reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet), nil + return policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet), nil } func (wr *Wrangler) getShardPrimaryForTablet(ctx context.Context, tablet *topodatapb.Tablet) (*topo.TabletInfo, error) { diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 548b86a1f72..984ff93095e 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -60,7 +61,7 @@ func TestEmergencyReparentShard(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{ @@ -211,7 +212,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) moreAdvancedReplica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.Replicating = true diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index feff3919685..f160ddfa32b 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -24,7 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" @@ -61,7 +61,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell2", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -178,7 +178,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -313,7 +313,7 @@ func TestPlannedReparentInitialization(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -692,7 +692,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { // Create a primary, a couple good replicas primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // old primary primary.FakeMysqlDaemon.ReadOnly = false diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 35ae15a960f..7012822a017 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -141,7 +142,7 @@ func TestReparentTablet(t *testing.T) { } primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) replica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // mark the primary inside the shard if _, err := ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error { @@ -197,7 +198,7 @@ func TestSetReplicationSource(t *testing.T) { require.NoError(t, err, "CreateShard failed") primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", reparentutil.DurabilitySemiSync) + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // mark the primary inside the shard _, err = ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error {