From 35e442305108eac3431c38f7de50ea0573d8cc32 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Sat, 29 Jul 2023 18:28:18 +0200 Subject: [PATCH 1/4] testserver: remove `(*TestServer).Gossip()` Tests should use `.GossipI()` Release note: None --- pkg/cli/node_test.go | 16 ++--- .../kvcoord/dist_sender_server_test.go | 21 ++++--- pkg/kv/kvserver/client_merge_test.go | 3 +- pkg/kv/kvserver/gossip_test.go | 37 +++++------ pkg/kv/kvserver/node_liveness_test.go | 61 ++++++++++--------- pkg/server/storage_api/nodes_test.go | 32 +++++----- pkg/server/testserver.go | 11 +--- pkg/sql/physicalplan/span_resolver_test.go | 2 +- pkg/sql/txn_restart_test.go | 5 +- pkg/testutils/testcluster/testcluster.go | 4 +- 10 files changed, 90 insertions(+), 102 deletions(-) diff --git a/pkg/cli/node_test.go b/pkg/cli/node_test.go index f84ba0e80c2..faa8ae59466 100644 --- a/pkg/cli/node_test.go +++ b/pkg/cli/node_test.go @@ -161,25 +161,19 @@ func checkNodeStatus(t *testing.T, c TestCLI, output string, start time.Time) { t.Fatalf("%s", err) } - nodeID := c.Gossip().NodeID.Get() + nodeID := c.NodeID() nodeIDStr := strconv.FormatInt(int64(nodeID), 10) if a, e := fields[0], nodeIDStr; a != e { t.Errorf("node id (%s) != expected (%s)", a, e) } - nodeAddr, err := c.Gossip().GetNodeIDAddress(nodeID) - if err != nil { - t.Fatal(err) - } - if a, e := fields[1], nodeAddr.String(); a != e { + nodeAddr := c.AdvRPCAddr() + if a, e := fields[1], nodeAddr; a != e { t.Errorf("node address (%s) != expected (%s)", a, e) } - nodeSQLAddr, err := c.Gossip().GetNodeIDSQLAddress(nodeID) - if err != nil { - t.Fatal(err) - } - if a, e := fields[2], nodeSQLAddr.String(); a != e { + nodeSQLAddr := c.AdvSQLAddr() + if a, e := fields[2], nodeSQLAddr; a != e { t.Errorf("node SQL address (%s) != expected (%s)", a, e) } diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index cb8a5f91490..81e633e0d63 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -87,14 +87,15 @@ func TestRangeLookupWithOpenTransaction(t *testing.T) { // Create a new DistSender and client.DB so that the Get below is guaranteed // to not hit in the range descriptor cache forcing a RangeLookup operation. ambient := s.AmbientCtx() + gs := s.GossipI().(*gossip.Gossip) ds := kvcoord.NewDistSender(kvcoord.DistSenderConfig{ AmbientCtx: ambient, Settings: cluster.MakeTestingClusterSettings(), Clock: s.Clock(), - NodeDescs: s.Gossip(), + NodeDescs: gs, RPCContext: s.RPCContext(), - NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(s.Gossip())), - FirstRangeProvider: s.Gossip(), + NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(gs)), + FirstRangeProvider: gs, }) tsf := kvcoord.NewTxnCoordSenderFactory( kvcoord.TxnCoordSenderFactoryConfig{ @@ -1120,16 +1121,17 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { // applied by time we execute the scan. If it has not run, then try the // scan again. READ_UNCOMMITTED and INCONSISTENT reads to not push // intents. + gs := s.GossipI().(*gossip.Gossip) testutils.SucceedsSoon(t, func() error { clock := hlc.NewClockForTesting(timeutil.NewManualTime(ts.GoTime().Add(1))) ds := kvcoord.NewDistSender(kvcoord.DistSenderConfig{ AmbientCtx: s.AmbientCtx(), Settings: s.ClusterSettings(), Clock: clock, - NodeDescs: s.Gossip(), + NodeDescs: gs, RPCContext: s.RPCContext(), - NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(s.Gossip())), - FirstRangeProvider: s.Gossip(), + NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(gs)), + FirstRangeProvider: gs, }) reply, err := kv.SendWrappedWith(ctx, ds, kvpb.Header{ReadConsistency: rc}, request) @@ -1650,14 +1652,15 @@ func TestBatchPutWithConcurrentSplit(t *testing.T) { // Now, split further at the given keys, but use a new dist sender so // we don't update the caches on the default dist sender-backed client. + gs := s.GossipI().(*gossip.Gossip) ds := kvcoord.NewDistSender(kvcoord.DistSenderConfig{ AmbientCtx: s.AmbientCtx(), Clock: s.Clock(), - NodeDescs: s.Gossip(), + NodeDescs: gs, RPCContext: s.RPCContext(), - NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(s.Gossip())), + NodeDialer: nodedialer.New(s.RPCContext(), gossip.AddressResolver(gs)), Settings: cluster.MakeTestingClusterSettings(), - FirstRangeProvider: s.Gossip(), + FirstRangeProvider: gs, }) for _, key := range []string{"c"} { req := &kvpb.AdminSplitRequest{ diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index c5f94e4b0c1..35946d3b0f2 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -2473,7 +2473,8 @@ func TestStoreReplicaGCAfterMerge(t *testing.T) { tc.Servers[0].AmbientCtx(), cluster.MakeTestingClusterSettings(), tc.Servers[0].AmbientCtx().Tracer, - nodedialer.New(tc.Servers[0].RPCContext(), gossip.AddressResolver(tc.Servers[0].Gossip())), + nodedialer.New(tc.Servers[0].RPCContext(), + gossip.AddressResolver(tc.Servers[0].GossipI().(*gossip.Gossip))), nil, /* grpcServer */ tc.Servers[0].Stopper(), kvflowdispatch.NewDummyDispatch(), diff --git a/pkg/kv/kvserver/gossip_test.go b/pkg/kv/kvserver/gossip_test.go index 8337d5d1331..d7c05794d77 100644 --- a/pkg/kv/kvserver/gossip_test.go +++ b/pkg/kv/kvserver/gossip_test.go @@ -42,25 +42,26 @@ func TestGossipFirstRange(t *testing.T) { errors := make(chan error, 1) descs := make(chan *roachpb.RangeDescriptor) - unregister := tc.Servers[0].Gossip().RegisterCallback(gossip.KeyFirstRangeDescriptor, - func(_ string, content roachpb.Value) { - var desc roachpb.RangeDescriptor - if err := content.GetProto(&desc); err != nil { - select { - case errors <- err: - default: + unregister := tc.Servers[0].GossipI().(*gossip.Gossip). + RegisterCallback(gossip.KeyFirstRangeDescriptor, + func(_ string, content roachpb.Value) { + var desc roachpb.RangeDescriptor + if err := content.GetProto(&desc); err != nil { + select { + case errors <- err: + default: + } + } else { + select { + case descs <- &desc: + case <-time.After(45 * time.Second): + t.Logf("had to drop descriptor %+v", desc) + } } - } else { - select { - case descs <- &desc: - case <-time.After(45 * time.Second): - t.Logf("had to drop descriptor %+v", desc) - } - } - }, - // Redundant callbacks are required by this test. - gossip.Redundant, - ) + }, + // Redundant callbacks are required by this test. + gossip.Redundant, + ) // Unregister the callback before attempting to stop the stopper to prevent // deadlock. This is still flaky in theory since a callback can fire between // the last read from the channels and this unregister, but testing has diff --git a/pkg/kv/kvserver/node_liveness_test.go b/pkg/kv/kvserver/node_liveness_test.go index b93e3a9cfc2..3682e23c659 100644 --- a/pkg/kv/kvserver/node_liveness_test.go +++ b/pkg/kv/kvserver/node_liveness_test.go @@ -55,14 +55,14 @@ func verifyLiveness(t *testing.T, tc *testcluster.TestCluster) { return nil }) } -func verifyLivenessServer(s *server.TestServer, numServers int64) error { +func verifyLivenessServer(s serverutils.TestServerInterface, numServers int64) error { nl := s.NodeLiveness().(*liveness.NodeLiveness) - if !nl.GetNodeVitalityFromCache(s.Gossip().NodeID.Get()).IsLive(livenesspb.IsAliveNotification) { - return errors.Errorf("node %d not live", s.Gossip().NodeID.Get()) + if !nl.GetNodeVitalityFromCache(s.NodeID()).IsLive(livenesspb.IsAliveNotification) { + return errors.Errorf("node %d not live", s.NodeID()) } if a, e := nl.Metrics().LiveNodes.Value(), numServers; a != e { return errors.Errorf("expected node %d's LiveNodes metric to be %d; got %d", - s.Gossip().NodeID.Get(), e, a) + s.NodeID(), e, a) } return nil } @@ -106,7 +106,7 @@ func TestNodeLiveness(t *testing.T) { for _, s := range tc.Servers { nl := s.NodeLiveness().(*liveness.NodeLiveness) - nodeID := s.Gossip().NodeID.Get() + nodeID := s.NodeID() if nl.GetNodeVitalityFromCache(nodeID).IsLive(livenesspb.IsAliveNotification) { t.Errorf("expected node %d to be considered not-live after advancing node clock", nodeID) } @@ -180,7 +180,7 @@ func TestNodeLivenessInitialIncrement(t *testing.T) { // Verify liveness of all nodes for all nodes. verifyLiveness(t, tc) - nl, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[0].Gossip().NodeID.Get()) + nl, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[0].NodeID()) assert.True(t, ok) if nl.Epoch != 1 { t.Errorf("expected epoch to be set to 1 initially; got %d", nl.Epoch) @@ -193,7 +193,7 @@ func TestNodeLivenessInitialIncrement(t *testing.T) { func verifyEpochIncremented(t *testing.T, tc *testcluster.TestCluster, nodeIdx int) { testutils.SucceedsSoon(t, func() error { - liv, ok := tc.Servers[nodeIdx].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[nodeIdx].Gossip().NodeID.Get()) + liv, ok := tc.Servers[nodeIdx].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[nodeIdx].NodeID()) if !ok { return errors.New("liveness not found") } @@ -339,7 +339,7 @@ func TestNodeIsLiveCallback(t *testing.T) { cbMu.Lock() defer cbMu.Unlock() for _, s := range tc.Servers { - nodeID := s.Gossip().NodeID.Get() + nodeID := s.NodeID() if _, ok := cbs[nodeID]; !ok { return errors.Errorf("expected IsLive callback for node %d", nodeID) } @@ -405,7 +405,7 @@ func TestNodeLivenessEpochIncrement(t *testing.T) { pauseNodeLivenessHeartbeatLoops(tc) // First try to increment the epoch of a known-live node. - deadNodeID := tc.Servers[1].Gossip().NodeID.Get() + deadNodeID := tc.Servers[1].NodeID() oldLiveness, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(deadNodeID) assert.True(t, ok) if err := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( @@ -502,10 +502,11 @@ func TestNodeLivenessRestart(t *testing.T) { // seeing the liveness record properly gossiped at store startup. var expKeys []string for _, s := range tc.Servers { - nodeID := s.Gossip().NodeID.Get() + nodeID := s.NodeID() key := gossip.MakeNodeLivenessKey(nodeID) expKeys = append(expKeys, key) - if err := s.Gossip().AddInfoProto(key, &livenesspb.Liveness{NodeID: nodeID}, 0); err != nil { + if err := s.GossipI().(*gossip.Gossip). + AddInfoProto(key, &livenesspb.Liveness{NodeID: nodeID}, 0); err != nil { t.Fatal(err) } } @@ -521,16 +522,17 @@ func TestNodeLivenessRestart(t *testing.T) { // Restart store and verify gossip contains liveness record for nodes 1&2. require.NoError(t, tc.RestartServerWithInspect(1, func(s *server.TestServer) { livenessRegex := gossip.MakePrefixPattern(gossip.KeyNodeLivenessPrefix) - s.Gossip().RegisterCallback(livenessRegex, func(key string, _ roachpb.Value) { - keysMu.Lock() - defer keysMu.Unlock() - for _, k := range keysMu.keys { - if k == key { - return + s.GossipI().(*gossip.Gossip). + RegisterCallback(livenessRegex, func(key string, _ roachpb.Value) { + keysMu.Lock() + defer keysMu.Unlock() + for _, k := range keysMu.keys { + if k == key { + return + } } - } - keysMu.keys = append(keysMu.keys, key) - }) + keysMu.keys = append(keysMu.keys, key) + }) })) testutils.SucceedsSoon(t, func() error { keysMu.Lock() @@ -555,10 +557,9 @@ func TestNodeLivenessSelf(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - s := serv.(*server.TestServer) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - g := s.Gossip() + g := s.GossipI().(*gossip.Gossip) nl := s.NodeLiveness().(*liveness.NodeLiveness) nl.PauseHeartbeatLoopForTest() @@ -642,7 +643,7 @@ func TestNodeLivenessGetIsLiveMap(t *testing.T) { manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) var livenessRec liveness.Record testutils.SucceedsSoon(t, func() error { - lr, ok := nl.GetLiveness(tc.Servers[0].Gossip().NodeID.Get()) + lr, ok := nl.GetLiveness(tc.Servers[0].NodeID()) if !ok { return errors.New("liveness not found") } @@ -709,7 +710,7 @@ func TestNodeLivenessGetLivenesses(t *testing.T) { manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) var livenessRecord liveness.Record testutils.SucceedsSoon(t, func() error { - livenessRec, ok := nl.GetLiveness(tc.Servers[0].Gossip().NodeID.Get()) + livenessRec, ok := nl.GetLiveness(tc.Servers[0].NodeID()) if !ok { return errors.New("liveness not found") } @@ -812,7 +813,7 @@ func TestNodeLivenessConcurrentIncrementEpochs(t *testing.T) { // Advance the clock and this time increment epoch concurrently for node 1. nl := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) - l, ok := nl.GetLiveness(tc.Servers[1].Gossip().NodeID.Get()) + l, ok := nl.GetLiveness(tc.Servers[1].NodeID()) assert.True(t, ok) errCh := make(chan error, concurrency) for i := 0; i < concurrency; i++ { @@ -870,7 +871,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { verifyLiveness(t, tc) drainingNodeIdx := 0 - drainingNodeID := tc.Servers[0].Gossip().NodeID.Get() + drainingNodeID := tc.Servers[0].NodeID() nodeIDAppearsInStoreList := func(id roachpb.NodeID, sl storepool.StoreList) bool { for _, store := range sl.TestingStores() { @@ -902,7 +903,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { // been gossiped to the rest of the cluster. testutils.SucceedsSoon(t, func() error { for i, s := range tc.Servers { - curNodeID := s.Gossip().NodeID.Get() + curNodeID := s.NodeID() sl, alive, _ := tc.GetFirstStoreFromServer(t, i).GetStoreConfig().StorePool.TestingGetStoreList() if alive != expectedLive { return errors.Errorf( @@ -936,7 +937,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { // been gossiped to the rest of the cluster. testutils.SucceedsSoon(t, func() error { for i, s := range tc.Servers { - curNodeID := s.Gossip().NodeID.Get() + curNodeID := s.NodeID() sl, alive, _ := tc.GetFirstStoreFromServer(t, i).GetStoreConfig().StorePool.TestingGetStoreList() if alive != expectedLive { return errors.Errorf( @@ -1204,7 +1205,7 @@ func testNodeLivenessSetDecommissioning(t *testing.T, decommissionNodeIdx int) { verifyLiveness(t, tc) callerNodeLiveness := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) - nodeID := tc.Servers[decommissionNodeIdx].Gossip().NodeID.Get() + nodeID := tc.Servers[decommissionNodeIdx].NodeID() // Verify success on failed update of a liveness record that already has the // given decommissioning setting. diff --git a/pkg/server/storage_api/nodes_test.go b/pkg/server/storage_api/nodes_test.go index afd2267dea5..547285b3762 100644 --- a/pkg/server/storage_api/nodes_test.go +++ b/pkg/server/storage_api/nodes_test.go @@ -36,19 +36,15 @@ import ( func TestStatusJson(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - ts := s.(*server.TestServer) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + }) + defer srv.Stopper().Stop(context.Background()) + s := srv.SystemLayer() - nodeID := ts.Gossip().NodeID.Get() - addr, err := ts.Gossip().GetNodeIDAddress(nodeID) - if err != nil { - t.Fatal(err) - } - sqlAddr, err := ts.Gossip().GetNodeIDSQLAddress(nodeID) - if err != nil { - t.Fatal(err) - } + nodeID := srv.StorageLayer().NodeID() + addr := s.AdvRPCAddr() + sqlAddr := s.AdvSQLAddr() var nodes serverpb.NodesResponse testutils.SucceedsSoon(t, func() error { @@ -73,10 +69,10 @@ func TestStatusJson(t *testing.T) { if a, e := details.NodeID, nodeID; a != e { t.Errorf("expected: %d, got: %d", e, a) } - if a, e := details.Address, *addr; a != e { + if a, e := details.Address.String(), addr; a != e { t.Errorf("expected: %v, got: %v", e, a) } - if a, e := details.SQLAddress, *sqlAddr; a != e { + if a, e := details.SQLAddress.String(), sqlAddr; a != e { t.Errorf("expected: %v, got: %v", e, a) } if a, e := details.BuildInfo, build.GetInfo(); a != e { @@ -172,12 +168,14 @@ func TestMetricsRecording(t *testing.T) { func TestMetricsEndpoint(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + }) defer srv.Stopper().Stop(context.Background()) - s := srv.(*server.TestServer) + s := srv.ApplicationLayer() - if _, err := srvtestutils.GetText(s, s.AdminURL().WithPath(apiconstants.StatusPrefix+"metrics/"+s.Gossip().NodeID.String()).String()); err != nil { + if _, err := srvtestutils.GetText(s, s.AdminURL().WithPath(apiconstants.StatusPrefix+"metrics/"+srv.NodeID().String()).String()); err != nil { t.Fatal(err) } } diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 376507b2d8b..b1b802077b5 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -29,7 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" @@ -382,15 +381,7 @@ func (ts *TestServer) Stopper() *stop.Stopper { // GossipI is part of the serverutils.StorageLayerInterface. func (ts *TestServer) GossipI() interface{} { - return ts.Gossip() -} - -// Gossip is like GossipI but returns the real type instead of interface{}. -func (ts *TestServer) Gossip() *gossip.Gossip { - if ts != nil { - return ts.gossip - } - return nil + return ts.Server.gossip } // RangeFeedFactory is part of serverutils.ApplicationLayerInterface. diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index 3f3feb2fdc8..c24589dde5d 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -89,7 +89,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { lr := physicalplan.NewSpanResolver( s3.Cfg.Settings, s3.DistSenderI().(*kvcoord.DistSender), - s3.Gossip(), + s3.GossipI().(*gossip.Gossip), s3.GetNode().Descriptor.NodeID, s3.GetNode().Descriptor.Locality, s3.Clock(), diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index b7dcbed6ea8..7c327562eeb 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -1181,7 +1180,7 @@ func TestReacquireLeaseOnRestart(t *testing.T) { txn := ba.Txn txn.ResetObservedTimestamps() now := s.Clock().NowAsClockTimestamp() - txn.UpdateObservedTimestamp(s.(*server.TestServer).Gossip().NodeID.Get(), now) + txn.UpdateObservedTimestamp(s.NodeID(), now) return kvpb.NewErrorWithTxn(kvpb.NewReadWithinUncertaintyIntervalError(now.ToTimestamp(), now, txn, now.ToTimestamp(), now), txn) } } @@ -1265,7 +1264,7 @@ func TestFlushUncommitedDescriptorCacheOnRestart(t *testing.T) { txn := args.Hdr.Txn txn.ResetObservedTimestamps() now := s.Clock().NowAsClockTimestamp() - txn.UpdateObservedTimestamp(s.(*server.TestServer).Gossip().NodeID.Get(), now) + txn.UpdateObservedTimestamp(s.NodeID(), now) return kvpb.NewErrorWithTxn(kvpb.NewReadWithinUncertaintyIntervalError(now.ToTimestamp(), now, txn, now.ToTimestamp(), now), txn) } } diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index 3420360ac54..fbbcc8b967e 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -413,7 +413,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // We want to wait for stores for each server in order to have predictable // store IDs. Otherwise, stores can be asynchronously bootstrapped in an // unexpected order (#22342). - tc.WaitForNStores(t, i+1, tc.Servers[0].Gossip()) + tc.WaitForNStores(t, i+1, tc.Servers[0].GossipI().(*gossip.Gossip)) } } @@ -428,7 +428,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { } } - tc.WaitForNStores(t, tc.NumServers(), tc.Servers[0].Gossip()) + tc.WaitForNStores(t, tc.NumServers(), tc.Servers[0].GossipI().(*gossip.Gossip)) } // Now that we have started all the servers on the bootstrap version, let us From fc7eceea3bc8e811cae9bdd9794d7f56f1a30baa Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Sat, 29 Jul 2023 18:28:56 +0200 Subject: [PATCH 2/4] tests: remove direct cast to `*TestServer` We want to keep tests using go interfaces, to ensure the test server APIs can be mocked if needed. Incidentally, this patch improves the APIs as follows: - the health probe for secondary tenant servers now properly includes the draining state of the RPC interface (previously, only the SQL draining state was included). - `ApplicationLayerInterface` now includes `Readiness()` and `DefaultZoneConfig()`. - `StorageLayerInterface` now includes `ScratchRangeWithExpirationLease()`, `GetRangeLease()`, `TsDB()`, `Locality()` and `DefaultSystemZoneConfig()`. Release note: None --- .github/CODEOWNERS | 1 + pkg/BUILD.bazel | 1 + pkg/bench/BUILD.bazel | 1 - pkg/bench/foreachdb.go | 3 +- pkg/ccl/backupccl/backup_tenant_test.go | 3 +- pkg/ccl/backupccl/backup_test.go | 15 +- .../changefeedccl/alter_changefeed_test.go | 7 +- pkg/ccl/changefeedccl/changefeed_test.go | 7 +- pkg/ccl/changefeedccl/helpers_test.go | 2 +- .../scheduled_changefeed_test.go | 6 +- .../kvfollowerreadsccl/followerreads_test.go | 6 +- pkg/ccl/multiregionccl/BUILD.bazel | 1 + .../multiregionccl/cold_start_latency_test.go | 3 +- pkg/ccl/multiregionccl/region_util_test.go | 2 +- pkg/ccl/partitionccl/zone_test.go | 11 +- pkg/ccl/serverccl/admin_test.go | 3 +- pkg/ccl/serverccl/server_controller_test.go | 4 +- pkg/ccl/serverccl/tenant_vars_test.go | 3 +- pkg/ccl/sqlproxyccl/proxy_handler_test.go | 14 +- pkg/ccl/testccl/authccl/auth_test.go | 3 +- pkg/cli/clierror/syntax_error_test.go | 2 +- pkg/cli/clisqlclient/conn_test.go | 6 +- pkg/cli/clisqlexec/run_query_test.go | 4 +- pkg/cli/clisqlshell/describe_test.go | 2 +- pkg/cli/clisqlshell/sql_test.go | 2 +- pkg/cli/debug_job_trace_test.go | 6 +- pkg/cli/debug_recover_loss_of_quorum_test.go | 4 +- pkg/cli/democluster/BUILD.bazel | 2 +- pkg/cli/democluster/demo_cluster.go | 74 ++++--- pkg/cli/democluster/session_persistence.go | 2 +- pkg/cli/import_test.go | 4 +- pkg/cli/node_test.go | 10 +- pkg/cli/nodelocal_test.go | 2 +- pkg/cli/testutils.go | 51 +++-- pkg/cli/userfiletable_test.go | 17 +- pkg/cli/zip_test.go | 16 +- .../kvcoord/dist_sender_server_test.go | 12 +- .../batcheval/cmd_add_sstable_test.go | 6 +- .../batcheval/cmd_delete_range_gchint_test.go | 10 +- .../knobs_use_range_tombstones_test.go | 6 +- .../client_atomic_membership_change_test.go | 2 +- pkg/kv/kvserver/client_decommission_test.go | 3 +- pkg/kv/kvserver/client_lease_test.go | 10 +- pkg/kv/kvserver/client_merge_test.go | 69 +++--- pkg/kv/kvserver/client_metrics_test.go | 5 +- pkg/kv/kvserver/client_migration_test.go | 10 +- pkg/kv/kvserver/client_mvcc_gc_test.go | 6 +- pkg/kv/kvserver/client_raft_helpers_test.go | 14 +- pkg/kv/kvserver/client_raft_test.go | 34 +-- pkg/kv/kvserver/client_relocate_range_test.go | 6 +- .../client_replica_circuit_breaker_test.go | 8 +- pkg/kv/kvserver/client_replica_gc_test.go | 4 +- pkg/kv/kvserver/client_replica_test.go | 97 ++++----- pkg/kv/kvserver/client_split_test.go | 190 ++++++++-------- pkg/kv/kvserver/consistency_queue_test.go | 2 +- .../kvserver/flow_control_integration_test.go | 4 +- .../intent_resolver_integration_test.go | 2 +- pkg/kv/kvserver/liveness/client_test.go | 2 +- pkg/kv/kvserver/node_liveness_test.go | 16 +- pkg/kv/kvserver/range_log_test.go | 14 +- pkg/kv/kvserver/replica_closedts_test.go | 4 +- pkg/kv/kvserver/replica_learner_test.go | 3 +- pkg/kv/kvserver/replica_probe_test.go | 4 +- pkg/kv/kvserver/replica_rangefeed_test.go | 30 +-- pkg/kv/kvserver/replicate_queue_test.go | 36 +-- pkg/kv/kvserver/replicate_test.go | 6 +- pkg/kv/kvserver/ts_maintenance_queue_test.go | 12 +- pkg/kv/txn_external_test.go | 2 +- pkg/roachpb/BUILD.bazel | 1 + pkg/roachpb/leaseinfo.go | 49 +++++ pkg/server/BUILD.bazel | 4 + pkg/server/admin.go | 33 +-- pkg/server/api_v2.go | 10 +- pkg/server/application_api/activity_test.go | 6 +- pkg/server/application_api/metrics_test.go | 7 +- pkg/server/application_api/sessions_test.go | 8 +- .../storage_inspection_test.go | 6 +- pkg/server/application_api/zcfg_test.go | 12 +- pkg/server/authserver/authentication_test.go | 3 +- pkg/server/connectivity_test.go | 2 +- pkg/server/decommission.go | 44 ++-- pkg/server/decommissioning/BUILD.bazel | 12 + pkg/server/decommissioning/decommissioning.go | 35 +++ pkg/server/diagnostics/update_checker_test.go | 2 +- pkg/server/grpc_server.go | 19 ++ pkg/server/node_test.go | 53 ++--- pkg/server/purge_auth_session_test.go | 12 +- pkg/server/server.go | 13 +- .../server_internal_executor_factory_test.go | 9 +- pkg/server/server_special_test.go | 138 ++++++++++++ pkg/server/server_systemlog_gc_test.go | 12 +- pkg/server/server_test.go | 153 ++----------- pkg/server/settings_cache_test.go | 23 +- pkg/server/span_stats_test.go | 6 +- pkg/server/storage_api/BUILD.bazel | 1 + pkg/server/storage_api/decommission_test.go | 10 +- pkg/server/storage_api/files_test.go | 4 +- pkg/server/storage_api/health_test.go | 103 +++++---- pkg/server/storage_api/logfiles_test.go | 12 +- pkg/server/storage_api/nodes_test.go | 5 +- pkg/server/storage_api/ranges_test.go | 11 +- pkg/server/testserver.go | 206 ++++++++++-------- pkg/server/user_test.go | 12 +- pkg/sql/authorization_test.go | 7 +- pkg/sql/catalog/lease/lease_test.go | 11 +- pkg/sql/drop_test.go | 5 +- pkg/sql/internal_test.go | 6 +- pkg/sql/pgwire/auth_test.go | 32 +-- pkg/sql/pgwire/conn_test.go | 65 +++--- pkg/sql/pgwire/main_test.go | 2 + pkg/sql/pgwire/pgtest_test.go | 2 +- pkg/sql/pgwire/pgwire_test.go | 30 ++- pkg/sql/physicalplan/span_resolver_test.go | 27 ++- .../sem/builtins/fingerprint_builtin_test.go | 9 +- pkg/sql/split_test.go | 3 +- pkg/sql/sqlnoccltest/partition_test.go | 5 +- pkg/sql/tests/split_test.go | 6 +- pkg/sql/unsplit_test.go | 3 +- pkg/sql/zone_config_test.go | 19 +- pkg/sql/zone_test.go | 13 +- pkg/testutils/lint/lint_test.go | 35 +++ pkg/testutils/serverutils/BUILD.bazel | 2 + pkg/testutils/serverutils/api.go | 134 +++++++++++- pkg/testutils/testcluster/testcluster.go | 86 ++++---- pkg/testutils/testcluster/testcluster_test.go | 20 +- pkg/ts/server_test.go | 32 ++- pkg/upgrade/upgrades/system_job_info_test.go | 2 +- 127 files changed, 1390 insertions(+), 1131 deletions(-) create mode 100644 pkg/roachpb/leaseinfo.go create mode 100644 pkg/server/decommissioning/BUILD.bazel create mode 100644 pkg/server/decommissioning/decommissioning.go create mode 100644 pkg/server/server_special_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3d0f4019f2d..b43c2d857e9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -490,6 +490,7 @@ /pkg/roachpb/.gitattributes @cockroachdb/dev-inf #!/pkg/roachpb/BUILD.bazel @cockroachdb/kv-prs-noreview /pkg/roachpb/data* @cockroachdb/kv-prs +/pkg/roachpb/leaseinfo* @cockroachdb/kv-prs /pkg/roachpb/index* @cockroachdb/cluster-observability /pkg/roachpb/internal* @cockroachdb/kv-prs /pkg/roachpb/io-formats* @cockroachdb/disaster-recovery diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 37cc89062a0..c5c76e97e16 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -1529,6 +1529,7 @@ GO_TARGETS = [ "//pkg/server/debug/replay:replay", "//pkg/server/debug:debug", "//pkg/server/debug:debug_test", + "//pkg/server/decommissioning:decommissioning", "//pkg/server/diagnostics/diagnosticspb:diagnosticspb", "//pkg/server/diagnostics:diagnostics", "//pkg/server/diagnostics:diagnostics_test", diff --git a/pkg/bench/BUILD.bazel b/pkg/bench/BUILD.bazel index ee2e1a1cb4d..c9a3a31c1d5 100644 --- a/pkg/bench/BUILD.bazel +++ b/pkg/bench/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//pkg/ccl", "//pkg/multitenant/tenantcapabilities", "//pkg/roachpb", - "//pkg/server", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/skip", diff --git a/pkg/bench/foreachdb.go b/pkg/bench/foreachdb.go index 8c6144d6d3b..b9617ac8c24 100644 --- a/pkg/bench/foreachdb.go +++ b/pkg/bench/foreachdb.go @@ -26,7 +26,6 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/ccl" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -71,7 +70,7 @@ func benchmarkSharedProcessTenantCockroach(b *testing.B, f BenchmarkFn) { // Create our own test tenant with a known name. tenantName := "benchtenant" - _, tenantDB, err := s.(*server.TestServer).StartSharedProcessTenant(ctx, + _, tenantDB, err := s.TenantController().StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ TenantName: roachpb.TenantName(tenantName), UseDatabase: "bench", diff --git a/pkg/ccl/backupccl/backup_tenant_test.go b/pkg/ccl/backupccl/backup_tenant_test.go index 35dd3bd3ce6..c727bdb0251 100644 --- a/pkg/ccl/backupccl/backup_tenant_test.go +++ b/pkg/ccl/backupccl/backup_tenant_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" _ "github.com/cockroachdb/cockroach/pkg/sql/importer" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -87,7 +86,7 @@ func TestBackupTenantImportingTable(t *testing.T) { t.Fatal(err) } - if err := tc.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := tc.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index c0f483dd9e8..f9776de5e9c 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -69,7 +69,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql" @@ -7150,7 +7149,7 @@ func TestBackupRestoreTenant(t *testing.T) { ) tenantID := roachpb.MustMakeTenantID(10) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7242,7 +7241,7 @@ func TestBackupRestoreTenant(t *testing.T) { ) tenantID := roachpb.MustMakeTenantID(10) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7316,7 +7315,7 @@ func TestBackupRestoreTenant(t *testing.T) { ) tenantID := roachpb.MustMakeTenantID(10) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7334,7 +7333,7 @@ func TestBackupRestoreTenant(t *testing.T) { restoreTenant10.CheckQueryResults(t, `SHOW CLUSTER SETTING tenant_cost_model.write_payload_cost_per_mebibyte`, [][]string{{"456"}}) tenantID = roachpb.MustMakeTenantID(11) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7355,7 +7354,7 @@ func TestBackupRestoreTenant(t *testing.T) { restoreDB.Exec(t, `RESTORE TENANT 11 FROM 'nodelocal://1/clusterwide' WITH virtual_cluster = '20', virtual_cluster_name = 'tenant-20'`) tenantID = roachpb.MustMakeTenantID(20) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7392,7 +7391,7 @@ func TestBackupRestoreTenant(t *testing.T) { restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10' AS OF SYSTEM TIME `+ts1) tenantID := roachpb.MustMakeTenantID(10) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } @@ -7418,7 +7417,7 @@ func TestBackupRestoreTenant(t *testing.T) { restoreDB.Exec(t, `RESTORE TENANT 20 FROM 'nodelocal://1/t20'`) tenantID := roachpb.MustMakeTenantID(20) - if err := restoreTC.Server(0).(*server.TestServer).WaitForTenantReadiness(ctx, tenantID); err != nil { + if err := restoreTC.Server(0).TenantController().WaitForTenantReadiness(ctx, tenantID); err != nil { t.Fatal(err) } diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go index c8dd8e1ae6b..1ed63c647c2 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_test.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" @@ -50,7 +49,9 @@ func TestAlterChangefeedAddTargetPrivileges(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ + ctx := context.Background() + + s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ DefaultTestTenant: base.TODOTestTenantDisabled, Knobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -66,8 +67,6 @@ func TestAlterChangefeedAddTargetPrivileges(t *testing.T) { }, }, }) - ctx := context.Background() - s := srv.(*server.TestServer) defer s.Stopper().Stop(ctx) rootDB := sqlutils.MakeSQLRunner(db) diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index dd2cd926aeb..35d36fa0fbe 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -1744,7 +1744,7 @@ func TestChangefeedLaggingSpanCheckpointing(t *testing.T) { defer stopServer() sqlDB := sqlutils.MakeSQLRunner(db) - knobs := s.(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs(). DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -2695,7 +2695,8 @@ func TestChangefeedCreateAuthorizationWithChangefeedPriv(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ + ctx := context.Background() + s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ DefaultTestTenant: base.TODOTestTenantDisabled, Knobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -2711,8 +2712,6 @@ func TestChangefeedCreateAuthorizationWithChangefeedPriv(t *testing.T) { }, }, }) - ctx := context.Background() - s := srv.(*server.TestServer) defer s.Stopper().Stop(ctx) rootDB := sqlutils.MakeSQLRunner(db) diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 755161aa271..ee9a735c4e1 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -796,7 +796,7 @@ func makeSystemServerWithOptions( TestServer: TestServer{ DB: systemDB, Server: systemServer, - TestingKnobs: systemServer.(*server.TestServer).Cfg.TestingKnobs, + TestingKnobs: *systemServer.SystemLayer().TestingKnobs(), Codec: keys.SystemSQLCodec, }, SystemServer: systemServer, diff --git a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go index 0296bee083d..0d74975ce95 100644 --- a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go +++ b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -290,7 +289,8 @@ func TestCreateChangefeedScheduleChecksPermissionsDuringDryRun(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ + ctx := context.Background() + s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ DefaultTestTenant: base.TODOTestTenantDisabled, Knobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -306,8 +306,6 @@ func TestCreateChangefeedScheduleChecksPermissionsDuringDryRun(t *testing.T) { }, }, }) - ctx := context.Background() - s := srv.(*server.TestServer) defer s.Stopper().Stop(ctx) rootDB := sqlutils.MakeSQLRunner(db) rootDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`) diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go index 725ab67dc78..fac1d07a97a 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go @@ -803,7 +803,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { // Make a note of the follower reads metric on n3. We'll check that it was // incremented. var followerReadsCountBefore int64 - err := tc.Servers[2].Stores().VisitStores(func(s *kvserver.Store) error { + err := tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { followerReadsCountBefore = s.Metrics().FollowerReadsCount.Count() return nil }) @@ -820,7 +820,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { // Check that the follower read metric was incremented. var followerReadsCountAfter int64 - err = tc.Servers[2].Stores().VisitStores(func(s *kvserver.Store) error { + err = tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { followerReadsCountAfter = s.Metrics().FollowerReadsCount.Count() return nil }) @@ -1025,7 +1025,7 @@ func TestSecondaryTenantFollowerReadsRouting(t *testing.T) { getFollowerReadCounts := func() [numNodes]int64 { var counts [numNodes]int64 for i := range tc.Servers { - err := tc.Servers[i].Stores().VisitStores(func(s *kvserver.Store) error { + err := tc.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { counts[i] = s.Metrics().FollowerReadsCount.Count() return nil }) diff --git a/pkg/ccl/multiregionccl/BUILD.bazel b/pkg/ccl/multiregionccl/BUILD.bazel index b4ddb9ee699..f9b6e6cc0bb 100644 --- a/pkg/ccl/multiregionccl/BUILD.bazel +++ b/pkg/ccl/multiregionccl/BUILD.bazel @@ -66,6 +66,7 @@ go_test( "//pkg/server", "//pkg/server/serverpb", "//pkg/settings/cluster", + "//pkg/spanconfig", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catpb", diff --git a/pkg/ccl/multiregionccl/cold_start_latency_test.go b/pkg/ccl/multiregionccl/cold_start_latency_test.go index 21e0b99d5ec..413602b7326 100644 --- a/pkg/ccl/multiregionccl/cold_start_latency_test.go +++ b/pkg/ccl/multiregionccl/cold_start_latency_test.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils/regionlatency" @@ -288,7 +289,7 @@ SELECT checkpoint > extract(epoch from after) // Wait for the configs to be applied. testutils.SucceedsWithin(t, func() error { for _, server := range tc.Servers { - reporter := server.Server.SpanConfigReporter() + reporter := server.SpanConfigReporter().(spanconfig.Reporter) report, err := reporter.SpanConfigConformance(ctx, []roachpb.Span{ {Key: keys.TableDataMin, EndKey: keys.TenantTableDataMax}, }) diff --git a/pkg/ccl/multiregionccl/region_util_test.go b/pkg/ccl/multiregionccl/region_util_test.go index dafae288f9f..113b37e9f36 100644 --- a/pkg/ccl/multiregionccl/region_util_test.go +++ b/pkg/ccl/multiregionccl/region_util_test.go @@ -41,7 +41,7 @@ func TestGetLocalityRegionEnumPhysicalRepresentation(t *testing.T) { tDB := sqlutils.MakeSQLRunner(sqlDB) tDB.Exec(t, `CREATE DATABASE foo PRIMARY REGION "us-east1" REGIONS "us-east1", "us-east2", "us-east3"`) - s0 := tc.ServerTyped(0) + s0 := tc.Server(0) idb := s0.InternalDB().(descs.DB) dbID := descpb.ID(sqlutils.QueryDatabaseID(t, sqlDB, "foo")) diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index 067599bfd00..213199c1fda 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -55,9 +54,9 @@ func TestValidIndexPartitionSetShowZones(t *testing.T) { PARTITION p1 VALUES IN (DEFAULT) )`) - yamlDefault := fmt.Sprintf("gc: {ttlseconds: %d}", s.(*server.TestServer).Cfg.DefaultZoneConfig.GC.TTLSeconds) + yamlDefault := fmt.Sprintf("gc: {ttlseconds: %d}", s.DefaultZoneConfig().GC.TTLSeconds) yamlOverride := "gc: {ttlseconds: 42}" - zoneOverride := s.(*server.TestServer).Cfg.DefaultZoneConfig + zoneOverride := s.DefaultZoneConfig() zoneOverride.GC = &zonepb.GCPolicy{TTLSeconds: 42} partialZoneOverride := *zonepb.NewZoneConfig() partialZoneOverride.GC = &zonepb.GCPolicy{TTLSeconds: 42} @@ -67,7 +66,7 @@ func TestValidIndexPartitionSetShowZones(t *testing.T) { defaultRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), } defaultOverrideRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, @@ -403,11 +402,11 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT);`); err != nil { defaultRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), } tableID := sqlutils.QueryTableID(t, sqlDB, "t", "public", "test") - zoneOverride := s.(*server.TestServer).Cfg.DefaultZoneConfig + zoneOverride := s.DefaultZoneConfig() zoneOverride.GC = &zonepb.GCPolicy{TTLSeconds: 42} overrideRow := sqlutils.ZoneRow{ diff --git a/pkg/ccl/serverccl/admin_test.go b/pkg/ccl/serverccl/admin_test.go index 07a90515a76..ec929dcb553 100644 --- a/pkg/ccl/serverccl/admin_test.go +++ b/pkg/ccl/serverccl/admin_test.go @@ -16,7 +16,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -177,7 +176,7 @@ func TestListTenants(t *testing.T) { }) defer s.Stopper().Stop(ctx) - _, _, err := s.(*server.TestServer).StartSharedProcessTenant(ctx, + _, _, err := s.TenantController().StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ TenantName: "test", }) diff --git a/pkg/ccl/serverccl/server_controller_test.go b/pkg/ccl/serverccl/server_controller_test.go index 9e1c03066e1..cc994ed8a39 100644 --- a/pkg/ccl/serverccl/server_controller_test.go +++ b/pkg/ccl/serverccl/server_controller_test.go @@ -184,7 +184,7 @@ func TestServerControllerHTTP(t *testing.T) { t.Logf("waking up a test tenant") // Create our own test tenant with a known name. - _, _, err = s.(*server.TestServer).StartSharedProcessTenant(ctx, + _, _, err = s.TenantController().StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ TenantName: "hello", }) @@ -643,7 +643,7 @@ func TestServiceShutdownUsesGracefulDrain(t *testing.T) { drainCh := make(chan struct{}) // Start a shared process server. - _, _, err := s.(*server.TestServer).StartSharedProcessTenant(ctx, + _, _, err := s.TenantController().StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ TenantName: "hello", Knobs: base.TestingKnobs{ diff --git a/pkg/ccl/serverccl/tenant_vars_test.go b/pkg/ccl/serverccl/tenant_vars_test.go index da99fe944d7..78381e0af46 100644 --- a/pkg/ccl/serverccl/tenant_vars_test.go +++ b/pkg/ccl/serverccl/tenant_vars_test.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" _ "github.com/cockroachdb/cockroach/pkg/ccl/kvccl" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -48,7 +47,7 @@ func TestTenantVars(t *testing.T) { }) } else { var err error - tenant, _, err = srv.(*server.TestServer).StartSharedProcessTenant(ctx, + tenant, _, err = srv.TenantController().StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ TenantName: roachpb.TenantName("test"), TenantID: roachpb.MustMakeTenantID(20), diff --git a/pkg/ccl/sqlproxyccl/proxy_handler_test.go b/pkg/ccl/sqlproxyccl/proxy_handler_test.go index 30537a961b6..a52fc5cadff 100644 --- a/pkg/ccl/sqlproxyccl/proxy_handler_test.go +++ b/pkg/ccl/sqlproxyccl/proxy_handler_test.go @@ -139,7 +139,7 @@ func TestProxyProtocol(t *testing.T) { sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) pgs := ts.PGServer().(*pgwire.Server) pgs.TestingEnableAuthLogging() @@ -249,7 +249,7 @@ func TestPrivateEndpointsACL(t *testing.T) { sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) // Create a default user. @@ -691,7 +691,7 @@ func TestProxyAgainstSecureCRDB(t *testing.T) { sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) pgs := ts.PGServer().(*pgwire.Server) pgs.TestingEnableAuthLogging() @@ -888,7 +888,7 @@ func TestProxyTLSClose(t *testing.T) { sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) pgs := ts.PGServer().(*pgwire.Server) pgs.TestingEnableAuthLogging() @@ -939,7 +939,7 @@ func TestProxyModifyRequestParams(t *testing.T) { sql, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) pgs := ts.PGServer().(*pgwire.Server) pgs.TestingEnableAuthLogging() @@ -997,7 +997,7 @@ func TestInsecureProxy(t *testing.T) { sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) pgs := ts.PGServer().(*pgwire.Server) pgs.TestingEnableAuthLogging() @@ -1172,7 +1172,7 @@ func TestDenylistUpdate(t *testing.T) { sql, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sql.Stopper().Stop(ctx) - ts := sql.(*server.TestServer).ApplicationLayer() + ts := sql.ApplicationLayer() ts. PGPreServer().(*pgwire.PreServeConnHandler). TestingSetTrustClientProvidedRemoteAddr(true) diff --git a/pkg/ccl/testccl/authccl/auth_test.go b/pkg/ccl/testccl/authccl/auth_test.go index 0a821dbda35..c146e3a0e78 100644 --- a/pkg/ccl/testccl/authccl/auth_test.go +++ b/pkg/ccl/testccl/authccl/auth_test.go @@ -27,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/jwtauthccl" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/pgwire" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -418,7 +417,7 @@ func TestClientAddrOverride(t *testing.T) { s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) ctx := context.Background() defer s.Stopper().Stop(ctx) - ts := s.(*server.TestServer).ApplicationLayer() + ts := s.ApplicationLayer() pgURL, cleanupFunc := sqlutils.PGUrl( t, ts.AdvSQLAddr(), "testClientAddrOverride" /* prefix */, url.User(username.TestUser), diff --git a/pkg/cli/clierror/syntax_error_test.go b/pkg/cli/clierror/syntax_error_test.go index 1176dc8a963..766ce1a6363 100644 --- a/pkg/cli/clierror/syntax_error_test.go +++ b/pkg/cli/clierror/syntax_error_test.go @@ -34,7 +34,7 @@ func TestIsSQLSyntaxError(t *testing.T) { c := cli.NewCLITest(p) defer c.Cleanup() - url, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() var sqlConnCtx clisqlclient.Context diff --git a/pkg/cli/clisqlclient/conn_test.go b/pkg/cli/clisqlclient/conn_test.go index a3910940b81..cd11fc124ee 100644 --- a/pkg/cli/clisqlclient/conn_test.go +++ b/pkg/cli/clisqlclient/conn_test.go @@ -38,7 +38,7 @@ func TestConnRecover(t *testing.T) { defer c.Cleanup() ctx := context.Background() - url, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() conn := makeSQLConn(url.String()) @@ -108,7 +108,7 @@ func simulateServerRestart( t *testing.T, c *cli.TestCLI, p cli.TestCLIParams, conn clisqlclient.Conn, ) func() { c.RestartServer(p) - url2, cleanup2 := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url2, cleanup2 := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) conn.SetURL(url2.String()) return cleanup2 } @@ -121,7 +121,7 @@ func TestTransactionRetry(t *testing.T) { defer c.Cleanup() ctx := context.Background() - url, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() conn := makeSQLConn(url.String()) diff --git a/pkg/cli/clisqlexec/run_query_test.go b/pkg/cli/clisqlexec/run_query_test.go index 0d53c892494..c9dafa78456 100644 --- a/pkg/cli/clisqlexec/run_query_test.go +++ b/pkg/cli/clisqlexec/run_query_test.go @@ -49,7 +49,7 @@ func TestRunQuery(t *testing.T) { c := cli.NewCLITest(cli.TestCLIParams{T: t}) defer c.Cleanup() - url, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() conn := makeSQLConn(url.String()) @@ -179,7 +179,7 @@ func TestUtfName(t *testing.T) { c := cli.NewCLITest(cli.TestCLIParams{T: t}) defer c.Cleanup() - url, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + url, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() conn := makeSQLConn(url.String()) diff --git a/pkg/cli/clisqlshell/describe_test.go b/pkg/cli/clisqlshell/describe_test.go index 935743bc1ea..7e03c3364a2 100644 --- a/pkg/cli/clisqlshell/describe_test.go +++ b/pkg/cli/clisqlshell/describe_test.go @@ -38,7 +38,7 @@ func TestDescribe(t *testing.T) { c := cli.NewCLITest(cli.TestCLIParams{T: t}) defer c.Cleanup() - db := c.TestServer.SQLConn(t, "defaultdb") + db := c.Server.SQLConn(t, "defaultdb") var commonArgs []string diff --git a/pkg/cli/clisqlshell/sql_test.go b/pkg/cli/clisqlshell/sql_test.go index 296fbc3800f..c9e06e0c72f 100644 --- a/pkg/cli/clisqlshell/sql_test.go +++ b/pkg/cli/clisqlshell/sql_test.go @@ -359,7 +359,7 @@ func Example_sql_lex() { var sqlConnCtx clisqlclient.Context conn := sqlConnCtx.MakeSQLConn(io.Discard, io.Discard, fmt.Sprintf("postgres://%s@%s/?sslmode=disable", - username.RootUser, c.AdvSQLAddr())) + username.RootUser, c.Server.AdvSQLAddr())) defer func() { if err := conn.Close(); err != nil { fmt.Printf("error closing connection: %v\n", err) diff --git a/pkg/cli/debug_job_trace_test.go b/pkg/cli/debug_job_trace_test.go index 5befccef89a..9055e10ab5e 100644 --- a/pkg/cli/debug_job_trace_test.go +++ b/pkg/cli/debug_job_trace_test.go @@ -82,7 +82,7 @@ func TestDebugJobTrace(t *testing.T) { defer c.Cleanup() c.omitArgs = true - registry := c.TestServer.JobRegistry().(*jobs.Registry) + registry := c.Server.JobRegistry().(*jobs.Registry) jobCtx, cancel := context.WithCancel(ctx) defer cancel() @@ -107,7 +107,7 @@ func TestDebugJobTrace(t *testing.T) { // to inject our traceSpanResumer. var job *jobs.StartableJob id := registry.MakeJobID() - require.NoError(t, c.TestServer.InternalDB().(isql.DB).Txn(ctx, func( + require.NoError(t, c.Server.InternalDB().(isql.DB).Txn(ctx, func( ctx context.Context, txn isql.Txn, ) (err error) { err = registry.CreateStartableJobWithTxn(ctx, &job, id, txn, jobs.Record{ @@ -124,7 +124,7 @@ func TestDebugJobTrace(t *testing.T) { <-recordedSpanCh args := []string{strconv.Itoa(int(id))} - pgURL, cleanup := sqlutils.PGUrl(t, c.TestServer.AdvSQLAddr(), + pgURL, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), "TestDebugJobTrace", url.User(username.RootUser)) defer cleanup() diff --git a/pkg/cli/debug_recover_loss_of_quorum_test.go b/pkg/cli/debug_recover_loss_of_quorum_test.go index 1d7a638a3e1..f5492ec2de8 100644 --- a/pkg/cli/debug_recover_loss_of_quorum_test.go +++ b/pkg/cli/debug_recover_loss_of_quorum_test.go @@ -265,7 +265,7 @@ func TestLossOfQuorumRecovery(t *testing.T) { "Failed to decommission removed nodes") for i := 0; i < len(tcAfter.Servers); i++ { - require.NoError(t, tcAfter.Servers[i].Stores().VisitStores(func(store *kvserver.Store) error { + require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") @@ -275,7 +275,7 @@ func TestLossOfQuorumRecovery(t *testing.T) { require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") for i := 0; i < len(tcAfter.Servers); i++ { - require.NoError(t, tcAfter.Servers[i].Stores().VisitStores(func(store *kvserver.Store) error { + require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } diff --git a/pkg/cli/democluster/BUILD.bazel b/pkg/cli/democluster/BUILD.bazel index f6fc10cf352..5f909a1e470 100644 --- a/pkg/cli/democluster/BUILD.bazel +++ b/pkg/cli/democluster/BUILD.bazel @@ -36,7 +36,7 @@ go_library( "//pkg/server/status", "//pkg/sql", "//pkg/sql/catalog/catalogkeys", - "//pkg/sql/distsql", + "//pkg/sql/isql", "//pkg/sql/sem/catconstants", "//pkg/testutils/serverutils", "//pkg/testutils/serverutils/regionlatency", diff --git a/pkg/cli/democluster/demo_cluster.go b/pkg/cli/democluster/demo_cluster.go index 64f2a30227e..597c2a51f4c 100644 --- a/pkg/cli/democluster/demo_cluster.go +++ b/pkg/cli/democluster/demo_cluster.go @@ -41,7 +41,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/status" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" - "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils/regionlatency" @@ -67,7 +67,7 @@ import ( ) type serverEntry struct { - *server.TestServer + serverutils.TestServerInterface adminClient serverpb.AdminClient nodeID roachpb.NodeID } @@ -79,7 +79,7 @@ type transientCluster struct { demoDir string useSockets bool stopper *stop.Stopper - firstServer *server.TestServer + firstServer serverutils.TestServerInterface servers []serverEntry tenantServers []serverutils.ApplicationLayerInterface defaultDB string @@ -403,8 +403,10 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { for i := 0; i < c.demoCtx.NumNodes; i++ { createTenant := i == 0 - latencyMap := c.servers[i].Cfg.TestingKnobs.Server.(*server.TestingKnobs). - ContextTestingKnobs.InjectedLatencyOracle + var latencyMap rpc.InjectedLatencyOracle + if knobs := c.servers[i].TestingKnobs().Server; knobs != nil { + latencyMap = knobs.(*server.TestingKnobs).ContextTestingKnobs.InjectedLatencyOracle + } c.infoLog(ctx, "starting tenant node %d", i) var ts serverutils.ApplicationLayerInterface @@ -481,7 +483,7 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { // admin user. c.infoLog(ctx, "running initial SQL for demo cluster") // Propagate the server log tags to the operations below, to include node ID etc. - srv := c.firstServer.Server + srv := c.firstServer ctx = srv.AnnotateCtx(ctx) if err := srv.RunInitialSQL(ctx, c.demoCtx.NumNodes < 3, demoUsername, demoPassword); err != nil { @@ -499,7 +501,7 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { // Also create the user/password for the secondary tenant. ts := c.tenantServers[0] tctx := ts.AnnotateCtx(ctx) - ieTenant := ts.DistSQLServer().(*distsql.ServerImpl).ServerConfig.DB.Executor() + ieTenant := ts.InternalExecutor().(isql.Executor) _, err = ieTenant.Exec(tctx, "tenant-password", nil, fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s'", demoUsername, demoPassword)) if err != nil { @@ -513,7 +515,7 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { if c.demoCtx.Multitenant && !c.demoCtx.DisableServerController { // Select the default tenant. - ie := c.firstServer.DistSQLServer().(*distsql.ServerImpl).ServerConfig.DB.Executor() + ie := c.firstServer.InternalExecutor().(isql.Executor) // Choose the tenant to use when no tenant is specified on a // connection or web URL. if _, err := ie.Exec(ctx, "default-tenant", nil, @@ -631,20 +633,20 @@ func (c *transientCluster) createAndAddNode( // Create the server instance. This also registers the in-memory store // into the sticky engine registry. - s, err := server.TestServerFactory.New(args) + srv, err := server.TestServerFactory.New(args) if err != nil { return nil, err } - serv := s.(*server.TestServer) + s := srv.(serverutils.TestServerInterface) // Ensure that this server gets stopped when the top level demo // stopper instructs the cluster to stop. - c.stopper.AddCloser(stop.CloserFn(serv.Stop)) + c.stopper.AddCloser(stop.CloserFn(func() { s.Stop(context.Background()) })) if idx == 0 { // Remember the first server for later use by other APIs on // transientCluster. - c.firstServer = serv + c.firstServer = s // The first node connects its Settings instance to the `log` // package for crash reporting. // @@ -655,12 +657,12 @@ func (c *transientCluster) createAndAddNode( // // TODO(knz): re-connect the `log` package every time the first // node is restarted and gets a new `Settings` instance. - logcrash.SetGlobalSettings(&serv.ClusterSettings().SV) + logcrash.SetGlobalSettings(&s.ClusterSettings().SV) } // Remember this server for the stop/restart primitives in the SQL // shell. - c.servers = append(c.servers, serverEntry{TestServer: serv, nodeID: serv.NodeID()}) + c.servers = append(c.servers, serverEntry{TestServerInterface: s, nodeID: s.NodeID()}) return rpcAddrReadyCh, nil } @@ -673,7 +675,7 @@ func (c *transientCluster) startNodeAsync( return errors.AssertionFailedf("programming error: server %d not created yet", idx) } - serv := c.servers[idx] + s := c.servers[idx] tag := fmt.Sprintf("start-n%d", idx+1) return c.stopper.RunAsyncTask(ctx, tag, func(ctx context.Context) { // We call Start() with context.Background() because we don't want the @@ -684,7 +686,7 @@ func (c *transientCluster) startNodeAsync( ctx = logtags.WithTags(context.Background(), logtags.FromContext(ctx)) ctx = logtags.AddTag(ctx, tag, nil) - err := serv.Start(ctx) + err := s.Start(ctx) if err != nil { c.warnLog(ctx, "server %d failed to start: %v", idx, err) select { @@ -692,7 +694,7 @@ func (c *transientCluster) startNodeAsync( // Don't block if we are shutting down. case <-ctx.Done(): - case <-serv.Stopper().ShouldQuiesce(): + case <-s.Stopper().ShouldQuiesce(): case <-c.stopper.ShouldQuiesce(): case <-timeoutCh: } @@ -1005,7 +1007,7 @@ func (c *transientCluster) DrainAndShutdown(ctx context.Context, nodeID int32) e if serverIdx == -1 { return errors.Errorf("node %d does not exist", nodeID) } - if c.servers[serverIdx].TestServer == nil { + if c.servers[serverIdx].TestServerInterface == nil { return errors.Errorf("node %d is already shut down", nodeID) } // This is possible if we re-assign c.s and make the other nodes to the new @@ -1020,7 +1022,7 @@ func (c *transientCluster) DrainAndShutdown(ctx context.Context, nodeID int32) e if err := c.drainAndShutdown(ctx, c.servers[serverIdx].adminClient); err != nil { return err } - c.servers[serverIdx].TestServer = nil + c.servers[serverIdx].TestServerInterface = nil c.servers[serverIdx].adminClient = nil return nil } @@ -1133,7 +1135,7 @@ func (c *transientCluster) RestartNode(ctx context.Context, nodeID int32) error if serverIdx == -1 { return errors.Errorf("node %d does not exist", nodeID) } - if c.servers[serverIdx].TestServer != nil { + if c.servers[serverIdx].TestServerInterface != nil { return errors.Errorf("node %d is already running", nodeID) } @@ -1159,19 +1161,19 @@ func (c *transientCluster) startServerInternal( serverIdx, c.firstServer.AdvRPCAddr(), c.demoDir, c.stickyEngineRegistry) - s, err := server.TestServerFactory.New(args) + srv, err := server.TestServerFactory.New(args) if err != nil { return 0, err } - serv := s.(*server.TestServer) + s := srv.(serverutils.TestServerInterface) // We want to only return after the server is ready. readyCh := make(chan struct{}) - serv.Cfg.ReadyFn = func(bool) { + s.SetReadyFn(func(bool) { close(readyCh) - } + }) - if err := serv.Start(ctx); err != nil { + if err := s.Start(ctx); err != nil { return 0, err } @@ -1182,19 +1184,19 @@ func (c *transientCluster) startServerInternal( return 0, errors.Newf("could not initialize server %d in time", serverIdx) } - c.stopper.AddCloser(stop.CloserFn(serv.Stop)) - nodeID := serv.NodeID() + c.stopper.AddCloser(stop.CloserFn(func() { s.Stop(context.Background()) })) + nodeID := s.NodeID() - conn, err := serv.RPCClientConnE(username.RootUserName()) + conn, err := s.RPCClientConnE(username.RootUserName()) if err != nil { - serv.Stopper().Stop(ctx) + s.Stopper().Stop(ctx) return 0, err } c.servers[serverIdx] = serverEntry{ - TestServer: serv, - adminClient: serverpb.NewAdminClient(conn), - nodeID: nodeID, + TestServerInterface: s, + adminClient: serverpb.NewAdminClient(conn), + nodeID: nodeID, } return int32(nodeID), nil } @@ -1887,7 +1889,7 @@ func (c *transientCluster) NumServers() int { } func (c *transientCluster) Server(i int) serverutils.TestServerInterface { - return c.servers[i].TestServer + return c.servers[i].TestServerInterface } func (c *transientCluster) GetLocality(nodeID int32) string { @@ -1898,7 +1900,7 @@ func (c *transientCluster) ListDemoNodes(w, ew io.Writer, justOne, verbose bool) numNodesLive := 0 // First, list system tenant nodes. for i, s := range c.servers { - if s.TestServer == nil { + if s.TestServerInterface == nil { continue } numNodesLive++ @@ -1918,7 +1920,7 @@ func (c *transientCluster) ListDemoNodes(w, ew io.Writer, justOne, verbose bool) // When using the server controller, we have a single web UI // URL for both tenants. The demologin link does an // auto-login for both. - uiURL := c.addDemoLoginToURL(s.Cfg.AdminURL(), false /* includeTenantName */) + uiURL := c.addDemoLoginToURL(s.AdminURL().URL, false /* includeTenantName */) fmt.Fprintln(w, " (webui) ", uiURL) } if verbose { @@ -1950,7 +1952,7 @@ func (c *transientCluster) ListDemoNodes(w, ew io.Writer, justOne, verbose bool) } if !c.demoCtx.Multitenant || verbose { // Connection parameters for the system tenant follow. - uiURL := s.Cfg.AdminURL() + uiURL := s.AdminURL().URL if q := uiURL.Query(); c.demoCtx.Multitenant && !c.demoCtx.DisableServerController && !q.Has(server.ClusterNameParamInQueryURL) { q.Add(server.ClusterNameParamInQueryURL, catconstants.SystemTenantName) uiURL.RawQuery = q.Encode() diff --git a/pkg/cli/democluster/session_persistence.go b/pkg/cli/democluster/session_persistence.go index 7634ed97621..c117303c3e3 100644 --- a/pkg/cli/democluster/session_persistence.go +++ b/pkg/cli/democluster/session_persistence.go @@ -96,7 +96,7 @@ func (c *transientCluster) doPersistence( } } - if len(c.servers) > 0 && c.servers[0].TestServer != nil { + if len(c.servers) > 0 && c.servers[0].TestServerInterface != nil { sqlAddr := c.servers[0].AdvSQLAddr() host, port, _ := addr.SplitHostPort(sqlAddr, "") u.WithNet(pgurl.NetTCP(host, port)) diff --git a/pkg/cli/import_test.go b/pkg/cli/import_test.go index 7d5908d26bc..fa9dbe2b80b 100644 --- a/pkg/cli/import_test.go +++ b/pkg/cli/import_test.go @@ -60,7 +60,7 @@ func runImportCLICommand( data, err := os.ReadFile(dumpFilePath) require.NoError(t, err) userfileURI := constructUserfileDestinationURI(dumpFilePath, "", username.RootUserName()) - checkUserFileContent(ctx, t, c.ExecutorConfig(), username.RootUserName(), userfileURI, data) + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), userfileURI, data) select { case knobs.pauseAfterUpload <- struct{}{}: case err := <-errCh: @@ -73,7 +73,7 @@ func runImportCLICommand( // Check that the dump file has been cleaned up after the import CLI command // has completed. - store, err := c.ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.ExternalStorageFromURI(ctx, + store, err := c.Server.ExecutorConfig().(sql.ExecutorConfig).DistSQLSrv.ExternalStorageFromURI(ctx, userfileURI, username.RootUserName()) require.NoError(t, err) _, _, err = store.ReadFile(ctx, "", cloud.ReadOptions{NoFileSize: true}) diff --git a/pkg/cli/node_test.go b/pkg/cli/node_test.go index faa8ae59466..a8918ff309f 100644 --- a/pkg/cli/node_test.go +++ b/pkg/cli/node_test.go @@ -34,7 +34,7 @@ func Example_node() { defer c.Cleanup() // Refresh time series data, which is required to retrieve stats. - if err := c.WriteSummaries(); err != nil { + if err := c.Server.WriteSummaries(); err != nil { log.Fatalf(context.Background(), "Couldn't write stats summaries: %s", err) } @@ -71,7 +71,7 @@ func TestNodeStatus(t *testing.T) { defer c.Cleanup() // Refresh time series data, which is required to retrieve stats. - if err := c.WriteSummaries(); err != nil { + if err := c.Server.WriteSummaries(); err != nil { t.Fatalf("couldn't write stats summaries: %s", err) } @@ -161,18 +161,18 @@ func checkNodeStatus(t *testing.T, c TestCLI, output string, start time.Time) { t.Fatalf("%s", err) } - nodeID := c.NodeID() + nodeID := c.Server.NodeID() nodeIDStr := strconv.FormatInt(int64(nodeID), 10) if a, e := fields[0], nodeIDStr; a != e { t.Errorf("node id (%s) != expected (%s)", a, e) } - nodeAddr := c.AdvRPCAddr() + nodeAddr := c.Server.AdvRPCAddr() if a, e := fields[1], nodeAddr; a != e { t.Errorf("node address (%s) != expected (%s)", a, e) } - nodeSQLAddr := c.AdvSQLAddr() + nodeSQLAddr := c.Server.AdvSQLAddr() if a, e := fields[2], nodeSQLAddr; a != e { t.Errorf("node SQL address (%s) != expected (%s)", a, e) } diff --git a/pkg/cli/nodelocal_test.go b/pkg/cli/nodelocal_test.go index b597a2c9c3b..54c6600bdfc 100644 --- a/pkg/cli/nodelocal_test.go +++ b/pkg/cli/nodelocal_test.go @@ -115,7 +115,7 @@ func TestNodeLocalFileUpload(t *testing.T) { if err != nil { t.Fatal(err) } - writtenContent, err := os.ReadFile(filepath.Join(c.Cfg.Settings.ExternalIODir, destination)) + writtenContent, err := os.ReadFile(filepath.Join(c.Server.ClusterSettings().ExternalIODir, destination)) if err != nil { t.Fatal(err) } diff --git a/pkg/cli/testutils.go b/pkg/cli/testutils.go index 07e89cebc33..3c15b572dc9 100644 --- a/pkg/cli/testutils.go +++ b/pkg/cli/testutils.go @@ -33,7 +33,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/certnames" "github.com/cockroachdb/cockroach/pkg/security/securitytest" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -50,7 +49,10 @@ func TestingReset() { // TestCLI wraps a test server and is used by tests to make assertions about the output of CLI commands. type TestCLI struct { - *server.TestServer + // Insecure is a copy of the insecure mode parameter. + Insecure bool + + Server serverutils.TestServerInterface tenant serverutils.ApplicationLayerInterface certsDir string cleanupFunc func() error @@ -126,7 +128,7 @@ func NewCLITest(params TestCLIParams) TestCLI { } func newCLITestWithArgs(params TestCLIParams, argsFn func(args *base.TestServerArgs)) TestCLI { - c := TestCLI{t: params.T} + c := TestCLI{t: params.T, Insecure: params.Insecure} certsDir, err := os.MkdirTemp("", "cli-test") if err != nil { @@ -169,10 +171,10 @@ func newCLITestWithArgs(params TestCLIParams, argsFn func(args *base.TestServerA if err != nil { c.fail(err) } - c.TestServer = s.(*server.TestServer) + c.Server = s - log.Infof(context.Background(), "server started at %s", c.AdvRPCAddr()) - log.Infof(context.Background(), "SQL listener at %s", c.AdvSQLAddr()) + log.Infof(context.Background(), "server started at %s", c.Server.AdvRPCAddr()) + log.Infof(context.Background(), "SQL listener at %s", c.Server.AdvSQLAddr()) } if params.TenantArgs != nil && params.SharedProcessTenantArgs != nil { @@ -180,23 +182,23 @@ func newCLITestWithArgs(params TestCLIParams, argsFn func(args *base.TestServerA } if params.TenantArgs != nil || params.SharedProcessTenantArgs != nil { - if c.TestServer == nil { + if c.Server == nil { c.fail(errors.AssertionFailedf("multitenant mode for CLI requires a DB server, try setting `NoServer` argument to false")) } } if params.TenantArgs != nil { - if c.Insecure() { + if params.Insecure { params.TenantArgs.ForceInsecure = true } - c.tenant, err = c.TestServer.StartTenant(context.Background(), *params.TenantArgs) + c.tenant, err = c.Server.StartTenant(context.Background(), *params.TenantArgs) if err != nil { c.fail(err) } } if params.SharedProcessTenantArgs != nil { - c.tenant, _, err = c.TestServer.StartSharedProcessTenant(context.Background(), *params.SharedProcessTenantArgs) + c.tenant, _, err = c.Server.StartSharedProcessTenant(context.Background(), *params.SharedProcessTenantArgs) if err != nil { c.fail(err) } @@ -228,10 +230,10 @@ func setCLIDefaultsForTests() { // stopServer stops the test server. func (c *TestCLI) stopServer() { - if c.TestServer != nil { + if c.Server != nil { log.Infof(context.Background(), "stopping server at %s / %s", - c.AdvRPCAddr(), c.AdvSQLAddr()) - c.Stopper().Stop(context.Background()) + c.Server.AdvRPCAddr(), c.Server.AdvSQLAddr()) + c.Server.Stopper().Stop(context.Background()) } } @@ -248,14 +250,15 @@ func (c *TestCLI) RestartServer(params TestCLIParams) { if err != nil { c.fail(err) } - c.TestServer = s.(*server.TestServer) + c.Insecure = params.Insecure + c.Server = s log.Infof(context.Background(), "restarted server at %s / %s", - c.AdvRPCAddr(), c.AdvSQLAddr()) + c.Server.AdvRPCAddr(), c.Server.AdvSQLAddr()) if params.TenantArgs != nil { - if c.Insecure() { + if c.Insecure { params.TenantArgs.ForceInsecure = true } - c.tenant, _ = serverutils.StartTenant(c.t, c.TestServer, *params.TenantArgs) + c.tenant, _ = serverutils.StartTenant(c.t, c.Server, *params.TenantArgs) log.Infof(context.Background(), "restarted tenant SQL only server at %s", c.tenant.SQLAddr()) } } @@ -363,16 +366,16 @@ func isSQLCommand(args []string) (bool, error) { func (c TestCLI) getRPCAddr() string { if c.tenant != nil && !c.useSystemTenant { - return c.tenant.RPCAddr() + return c.tenant.AdvRPCAddr() } - return c.AdvRPCAddr() + return c.Server.AdvRPCAddr() } func (c TestCLI) getSQLAddr() string { if c.tenant != nil { - return c.tenant.SQLAddr() + return c.tenant.AdvSQLAddr() } - return c.AdvSQLAddr() + return c.Server.AdvSQLAddr() } // RunWithArgs add args according to TestCLI cfg. @@ -381,7 +384,7 @@ func (c TestCLI) RunWithArgs(origArgs []string) { if err := func() error { args := append([]string(nil), origArgs[:1]...) - if c.TestServer != nil { + if c.Server != nil { addr := c.getRPCAddr() if isSQL, err := isSQLCommand(origArgs); err != nil { return err @@ -393,7 +396,7 @@ func (c TestCLI) RunWithArgs(origArgs []string) { return err } args = append(args, fmt.Sprintf("--host=%s", net.JoinHostPort(h, p))) - if c.Cfg.Insecure { + if c.Insecure { args = append(args, "--insecure=true") } else { args = append(args, "--insecure=false") @@ -445,7 +448,7 @@ func (c TestCLI) RunWithCAArgs(origArgs []string) { if err := func() error { args := append([]string(nil), origArgs[:1]...) - if c.TestServer != nil { + if c.Server != nil { args = append(args, fmt.Sprintf("--ca-key=%s", filepath.Join(c.certsDir, certnames.EmbeddedCAKey))) args = append(args, fmt.Sprintf("--certs-dir=%s", c.certsDir)) } diff --git a/pkg/cli/userfiletable_test.go b/pkg/cli/userfiletable_test.go index f9d92772a9e..71931a00b6f 100644 --- a/pkg/cli/userfiletable_test.go +++ b/pkg/cli/userfiletable_test.go @@ -471,7 +471,7 @@ func TestUserFileUploadRecursive(t *testing.T) { if err != nil { return err } - checkUserFileContent(ctx, t, c.ExecutorConfig(), username.RootUserName(), + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), destinationFileURI, fileContent) return nil }) @@ -529,7 +529,7 @@ func TestUserFileUpload(t *testing.T) { destination)) require.NoError(t, err) - checkUserFileContent(ctx, t, c.ExecutorConfig(), username.RootUserName(), + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), constructUserfileDestinationURI("", destination, username.RootUserName()), tc.fileContent) }) @@ -540,7 +540,7 @@ func TestUserFileUpload(t *testing.T) { destination)) require.NoError(t, err) - checkUserFileContent(ctx, t, c.ExecutorConfig(), username.RootUserName(), + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), destination, tc.fileContent) }) @@ -552,7 +552,7 @@ func TestUserFileUpload(t *testing.T) { destination)) require.NoError(t, err) - checkUserFileContent(ctx, t, c.ExecutorConfig(), username.RootUserName(), + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), destination, tc.fileContent) }) @@ -607,7 +607,7 @@ func TestUserFileUploadExistingFile(t *testing.T) { require.Contains(t, out, "successfully uploaded to userfile://defaultdb.public.foo/test/file.csv") checkUserFileContent( - ctx, t, c.ExecutorConfig(), username.RootUserName(), destination, contents, + ctx, t, c.Server.ExecutorConfig(), username.RootUserName(), destination, contents, ) out, err = c.RunWithCapture(fmt.Sprintf("userfile upload %s %s", filePath, destination)) @@ -846,7 +846,7 @@ func TestUsernameUserfileInteraction(t *testing.T) { err := os.WriteFile(localFilePath, []byte("a"), 0666) require.NoError(t, err) - rootURL, cleanup := sqlutils.PGUrl(t, c.AdvSQLAddr(), t.Name(), + rootURL, cleanup := sqlutils.PGUrl(t, c.Server.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() @@ -885,7 +885,8 @@ func TestUsernameUserfileInteraction(t *testing.T) { err = conn.Exec(ctx, privsUserQuery) require.NoError(t, err) - userURL, cleanup2 := sqlutils.PGUrlWithOptionalClientCerts(t, c.AdvSQLAddr(), t.Name(), + userURL, cleanup2 := sqlutils.PGUrlWithOptionalClientCerts(t, + c.Server.AdvSQLAddr(), t.Name(), url.UserPassword(tc.username, "a"), false) defer cleanup2() @@ -896,7 +897,7 @@ func TestUsernameUserfileInteraction(t *testing.T) { user, err := username.MakeSQLUsernameFromUserInput(tc.username, username.PurposeCreation) require.NoError(t, err) uri := constructUserfileDestinationURI("", tc.name, user) - checkUserFileContent(ctx, t, c.ExecutorConfig(), user, uri, fileContent) + checkUserFileContent(ctx, t, c.Server.ExecutorConfig(), user, uri, fileContent) checkListedFiles(t, c, "", fmt.Sprintf("--url=%s", userURL.String()), []string{tc.name}) diff --git a/pkg/cli/zip_test.go b/pkg/cli/zip_test.go index b405a17b684..6db5297d2a6 100644 --- a/pkg/cli/zip_test.go +++ b/pkg/cli/zip_test.go @@ -33,7 +33,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" @@ -225,8 +224,9 @@ func TestConcurrentZip(t *testing.T) { // Zip it. We fake a CLI test context for this. c := TestCLI{ - t: t, - TestServer: tc.Server(0).(*server.TestServer), + t: t, + Server: tc.Server(0), + Insecure: true, } defer func(prevStderr *os.File) { stderr = prevStderr }(stderr) stderr = os.Stdout @@ -347,8 +347,9 @@ func TestUnavailableZip(t *testing.T) { // Zip it. We fake a CLI test context for this. c := TestCLI{ - t: t, - TestServer: tc.Server(0).(*server.TestServer), + t: t, + Server: tc.Server(0), + Insecure: true, } defer func(prevStderr *os.File) { stderr = prevStderr }(stderr) stderr = os.Stdout @@ -452,8 +453,9 @@ func TestPartialZip(t *testing.T) { // Zip it. We fake a CLI test context for this. c := TestCLI{ - t: t, - TestServer: tc.Server(0).(*server.TestServer), + t: t, + Server: tc.Server(0), + Insecure: true, } defer func(prevStderr *os.File) { stderr = prevStderr }(stderr) stderr = os.Stdout diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index 81e633e0d63..6ebde819737 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -53,7 +52,7 @@ import ( // starting a TestServer, which creates a "real" node and employs a // distributed sender server-side. -func startNoSplitMergeServer(t *testing.T) (*server.TestServer, *kv.DB) { +func startNoSplitMergeServer(t *testing.T) (serverutils.TestServerInterface, *kv.DB) { s, _, db := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -62,7 +61,7 @@ func startNoSplitMergeServer(t *testing.T) (*server.TestServer, *kv.DB) { }, }, }) - return s.(*server.TestServer), db + return s, db } // TestRangeLookupWithOpenTransaction verifies that range lookups are @@ -4045,11 +4044,10 @@ func TestTxnCoordSenderRetriesAcrossEndTxn(t *testing.T) { for _, tc := range testCases { t.Run("", func(t *testing.T) { - si, _, db := serverutils.StartServer(t, - base.TestServerArgs{Knobs: base.TestingKnobs{Store: &storeKnobs}}) ctx := context.Background() - defer si.Stopper().Stop(ctx) - s := si.(*server.TestServer) + s, _, db := serverutils.StartServer(t, + base.TestServerArgs{Knobs: base.TestingKnobs{Store: &storeKnobs}}) + defer s.Stopper().Stop(ctx) keyA, keyA1, keyB, keyB1 := roachpb.Key("a"), roachpb.Key("a1"), roachpb.Key("b"), roachpb.Key("b1") require.NoError(t, setupMultipleRanges(ctx, db, string(keyB))) diff --git a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go index 84cc5b24fdf..88501088f43 100644 --- a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go @@ -25,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -1924,15 +1923,14 @@ func TestAddSSTableSSTTimestampToRequestTimestampRespectsClosedTS(t *testing.T) defer log.Scope(t).Close(t) ctx := context.Background() - si, _, db := serverutils.StartServer(t, base.TestServerArgs{ + s, _, db := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableCanAckBeforeApplication: true, }, }, }) - defer si.Stopper().Stop(ctx) - s := si.(*server.TestServer) + defer s.Stopper().Stop(ctx) // Issue a write to trigger a closed timestamp. require.NoError(t, db.Put(ctx, "someKey", "someValue")) diff --git a/pkg/kv/kvserver/batcheval/cmd_delete_range_gchint_test.go b/pkg/kv/kvserver/batcheval/cmd_delete_range_gchint_test.go index e1a1c4b8710..ce074a705a1 100644 --- a/pkg/kv/kvserver/batcheval/cmd_delete_range_gchint_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_delete_range_gchint_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -31,7 +30,7 @@ func TestDeleteRangeTombstoneSetsGCHint(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -39,10 +38,9 @@ func TestDeleteRangeTombstoneSetsGCHint(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) key := roachpb.Key("b") @@ -58,7 +56,7 @@ func TestDeleteRangeTombstoneSetsGCHint(t *testing.T) { }, Value: roachpb.MakeValueFromBytes(content), } - if _, pErr := kv.SendWrapped(ctx, s.DistSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, s.DistSenderI().(kv.Sender), pArgs); pErr != nil { t.Fatal(pErr) } @@ -73,7 +71,7 @@ func TestDeleteRangeTombstoneSetsGCHint(t *testing.T) { EndKey: r.EndKey.AsRawKey(), }, } - if _, pErr := kv.SendWrapped(ctx, s.DistSender(), drArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, s.DistSenderI().(kv.Sender), drArgs); pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/batcheval/knobs_use_range_tombstones_test.go b/pkg/kv/kvserver/batcheval/knobs_use_range_tombstones_test.go index 5928da4a4f6..7ab0ac7ddc8 100644 --- a/pkg/kv/kvserver/batcheval/knobs_use_range_tombstones_test.go +++ b/pkg/kv/kvserver/batcheval/knobs_use_range_tombstones_test.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" @@ -35,7 +34,7 @@ func TestKnobsUseRangeTombstonesForPointDeletes(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv, _, db := serverutils.StartServer(t, base.TestServerArgs{ + s, _, db := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -46,10 +45,9 @@ func TestKnobsUseRangeTombstonesForPointDeletes(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) eng := store.TODOEngine() txn := db.NewTxn(ctx, "test") diff --git a/pkg/kv/kvserver/client_atomic_membership_change_test.go b/pkg/kv/kvserver/client_atomic_membership_change_test.go index 4b6568bfafe..c68dba36671 100644 --- a/pkg/kv/kvserver/client_atomic_membership_change_test.go +++ b/pkg/kv/kvserver/client_atomic_membership_change_test.go @@ -69,7 +69,7 @@ func TestAtomicReplicationChange(t *testing.T) { testutils.SucceedsSoon(t, func() error { var sawStores []roachpb.StoreID for _, s := range tc.Servers { - r, _, _ := s.Stores().GetReplicaForRangeID(ctx, desc.RangeID) + r, _, _ := s.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) if r == nil { continue } diff --git a/pkg/kv/kvserver/client_decommission_test.go b/pkg/kv/kvserver/client_decommission_test.go index 04fb4797773..f350e39f4d0 100644 --- a/pkg/kv/kvserver/client_decommission_test.go +++ b/pkg/kv/kvserver/client_decommission_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -70,7 +71,7 @@ func TestDecommission(t *testing.T) { attempt++ desc := tc.LookupRangeOrFatal(t, k) for _, rDesc := range desc.Replicas().VoterDescriptors() { - store, err := tc.Servers[int(rDesc.NodeID-1)].Stores().GetStore(rDesc.StoreID) + store, err := tc.Servers[int(rDesc.NodeID-1)].GetStores().(*kvserver.Stores).GetStore(rDesc.StoreID) require.NoError(t, err) if err := store.ForceReplicationScanAndProcess(); err != nil { return err diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index a85dbd5364d..910a0288d11 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -129,7 +129,7 @@ func TestGossipNodeLivenessOnLeaseChange(t *testing.T) { // Turn off liveness heartbeats on all nodes to ensure that updates to node // liveness are not triggering gossiping. for _, s := range tc.Servers { - pErr := s.Stores().VisitStores(func(store *kvserver.Store) error { + pErr := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.GetStoreConfig().NodeLiveness.PauseHeartbeatLoopForTest() return nil }) @@ -142,7 +142,7 @@ func TestGossipNodeLivenessOnLeaseChange(t *testing.T) { initialServerId := -1 for i, s := range tc.Servers { - pErr := s.Stores().VisitStores(func(store *kvserver.Store) error { + pErr := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { if store.Gossip().InfoOriginatedHere(nodeLivenessKey) { initialServerId = i } @@ -617,7 +617,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { manualClock.Pause() // Write a key. - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), incrementArgs(key, 1)) + _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), incrementArgs(key, 1)) require.Nil(t, pErr) // Determine when to read. @@ -631,7 +631,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { ba := &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(key)) - br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.Equal(t, readTS, br.Timestamp) v, err := br.Responses[0].GetGet().Value.GetInt() @@ -649,7 +649,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { ba = &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(key, 1)) - br, pErr = tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr = tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, readTS, br.Timestamp) require.True(t, readTS.Less(br.Timestamp)) diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 35946d3b0f2..85f9044aad3 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -31,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation" @@ -293,7 +292,7 @@ func mergeWithData(t *testing.T, retries int64) { return nil } - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -305,9 +304,8 @@ func mergeWithData(t *testing.T, retries int64) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) scratchKey, err := s.ScratchRangeWithExpirationLease() @@ -700,7 +698,7 @@ func mergeCheckingTimestampCaches( } else { funcs = partitionedLeaderFuncs } - tc.Servers[i].RaftTransport().ListenIncomingRaftMessages(s.StoreID(), &unreliableRaftHandler{ + tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), &unreliableRaftHandler{ rangeID: lhsDesc.GetRangeID(), IncomingRaftMessageHandler: s, unreliableRaftHandlerFuncs: funcs, @@ -812,7 +810,7 @@ func mergeCheckingTimestampCaches( } else { h = s } - tc.Servers[i].RaftTransport().ListenIncomingRaftMessages(s.StoreID(), h) + tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), h) } close(filterMu.blockHBAndGCs) filterMu.Lock() @@ -965,7 +963,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { }, }) defer tc.Stopper().Stop(context.Background()) - distSender := tc.Servers[0].DistSender() + distSender := tc.Servers[0].DistSenderI().(kv.Sender) for _, key := range []roachpb.Key{scratchKey("a"), scratchKey("b")} { if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { @@ -1622,7 +1620,7 @@ func TestStoreRangeMergeSplitRace_MergeWins(t *testing.T) { scratch := tc.ScratchRange(t) store := tc.GetFirstStoreFromServer(t, 0) - distSender := tc.Servers[0].DistSender() + distSender := tc.Servers[0].DistSenderI().(kv.Sender) lhsDesc, rhsDesc, err := createSplitRanges(ctx, scratch, store) if err != nil { @@ -1674,7 +1672,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { ctx := context.Background() - var distSender *kvcoord.DistSender + var distSender kv.Sender var lhsDescKey atomic.Value var lhsStartKey atomic.Value var launchSplit int64 @@ -1724,7 +1722,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { defer tc.Stopper().Stop(context.Background()) scratch := tc.ScratchRange(t) store := tc.GetFirstStoreFromServer(t, 0) - distSender = tc.Servers[0].DistSender() + distSender = tc.Servers[0].DistSenderI().(kv.Sender) lhsDesc, _, err := createSplitRanges(ctx, scratch, store) if err != nil { @@ -2110,7 +2108,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { mergeErr := make(chan error, 1) _ = tc.Stopper().RunAsyncTask(ctx, "merge", func(context.Context) { args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), args) + _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), args) mergeErr <- pErr.GoError() }) @@ -2150,7 +2148,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { ba.Timestamp = lhsClosedTS.Prev() ba.RangeID = lhsDesc.RangeID ba.Add(incrementArgs(rhsDesc.StartKey.AsRawKey().Next(), 1)) - br, pErr := tc.Servers[1].DistSender().Send(ctx, ba) + br, pErr := tc.Servers[1].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, ba.Timestamp, br.Timestamp, "write timestamp not bumped") require.True(t, lhsClosedTS.Less(br.Timestamp), "write timestamp not bumped above closed timestamp") @@ -2290,7 +2288,7 @@ func TestStoreRangeMergeConcurrentRequests(t *testing.T) { } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2303,9 +2301,8 @@ func TestStoreRangeMergeConcurrentRequests(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) key, err := s.ScratchRangeWithExpirationLease() @@ -2738,7 +2735,7 @@ func TestStoreRangeMergeSlowUnabandonedFollower_WithSplit(t *testing.T) { // Start dropping all Raft traffic to the LHS on store2 so that it won't be // aware that there is a merge in progress. - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -2901,7 +2898,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { keys := []roachpb.RKey{scratchRKey("a"), scratchRKey("b"), scratchRKey("c")} for _, key := range keys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -2927,7 +2924,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { // Merge all three ranges together. store2 won't hear about this merge. for i := 0; i < 2; i++ { - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), adminMergeArgs(scratchKey("a"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), adminMergeArgs(scratchKey("a"))); pErr != nil { t.Fatal(pErr) } } @@ -3019,7 +3016,7 @@ func TestStoreRangeMergeAbandonedFollowersAutomaticallyGarbageCollected(t *testi // Start dropping all Raft traffic to the LHS replica on store2 so that it // won't be aware that there is a merge in progress. - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -3294,7 +3291,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { defer tc.Stopper().Stop(ctx) tc.ScratchRange(t) store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) - distSender := tc.Servers[0].DistSender() + distSender := tc.Servers[0].DistSenderI().(kv.Sender) split := func(key roachpb.RKey) roachpb.RangeID { t.Helper() @@ -3336,7 +3333,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { }, }, } - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, unreliableHandler) + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, unreliableHandler) // Perform the split of A, now that store2 won't be able to initialize its // replica of A. @@ -3350,7 +3347,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { IncomingRaftMessageHandler: unreliableHandler, } defer slowSnapHandler.unblock() - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, slowSnapHandler) + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, slowSnapHandler) // Remove the replica of range 1 on store2. If we were to leave it in place, // store2 would refuse to GC its replica of C after the merge commits, because @@ -3649,7 +3646,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { testKeys := []roachpb.RKey{aKey, bKey, cKey} for _, key := range testKeys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -3661,7 +3658,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // to B while its blocked because of a stale DistSender cache. for _, key := range testKeys { for _, server := range tc.Servers { - if _, pErr := kv.SendWrapped(ctx, server.DistSender(), getArgs(key.AsRawKey())); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, server.DistSenderI().(kv.Sender), getArgs(key.AsRawKey())); pErr != nil { t.Fatal(pErr) } } @@ -3676,7 +3673,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // Merge A <- B. mergeArgs := adminMergeArgs(aKey.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), mergeArgs); pErr != nil { t.Fatal(pErr) } @@ -3690,13 +3687,13 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { }() // Merge AB <- C. - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), mergeArgs); pErr != nil { t.Fatal(pErr) } // Synchronously ensure that the intent on meta2CKey has been cleaned up. // The merge committed, but the intent resolution happens asynchronously. - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), getArgs(meta2CKey)) + _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), getArgs(meta2CKey)) if pErr != nil { t.Fatal(pErr) } @@ -3965,7 +3962,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) sendingEng = store0.TODOEngine() receivingEng = store2.TODOEngine() - distSender := tc.Servers[0].DistSender() + distSender := tc.Servers[0].DistSenderI().(kv.Sender) // This test works across 5 ranges in total. We start with a scratch range(1) // [Start, End). We then split this range as follows: @@ -4009,7 +4006,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { aRepl0 := store0.LookupReplica(roachpb.RKey(keyA)) // Start dropping all Raft traffic to the first range on store2. - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: aRepl0.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -4052,7 +4049,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // Restore Raft traffic to the LHS on store2. log.Infof(ctx, "restored traffic to store 2") - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: aRepl0.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -4120,7 +4117,7 @@ func TestStoreRangeMergeDuringShutdown(t *testing.T) { ctx := context.Background() // Install a filter that triggers a shutdown when stop is non-zero and the // rhsDesc requests a new lease. - var s *server.TestServer + var s serverutils.TestServerInterface var state struct { syncutil.Mutex rhsDesc *roachpb.RangeDescriptor @@ -4148,7 +4145,7 @@ func TestStoreRangeMergeDuringShutdown(t *testing.T) { } manualClock := hlc.NewHybridManualClock() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s = serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -4161,9 +4158,8 @@ func TestStoreRangeMergeDuringShutdown(t *testing.T) { }, }, }) - s = serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) key, err := s.ScratchRangeWithExpirationLease() @@ -5294,7 +5290,7 @@ func TestStoreMergeGCHint(t *testing.T) { } { t.Run(d.name, func(t *testing.T) { ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -5302,9 +5298,8 @@ func TestStoreMergeGCHint(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) leftKey := roachpb.Key("a") diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index ba84e7e4df9..306b0aedad6 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -186,10 +186,9 @@ func TestStoreResolveMetrics(t *testing.T) { } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - s := serv.(*server.TestServer) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) key, err := s.ScratchRange() diff --git a/pkg/kv/kvserver/client_migration_test.go b/pkg/kv/kvserver/client_migration_test.go index f1f60ad42d1..9c9d2166b86 100644 --- a/pkg/kv/kvserver/client_migration_test.go +++ b/pkg/kv/kvserver/client_migration_test.go @@ -86,7 +86,7 @@ func TestStorePurgeOutdatedReplicas(t *testing.T) { for _, node := range []int{n2, n3} { ts := tc.Servers[node] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -108,7 +108,7 @@ func TestStorePurgeOutdatedReplicas(t *testing.T) { } ts := tc.Servers[n2] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -220,7 +220,7 @@ func TestMigrateWithInflightSnapshot(t *testing.T) { for _, node := range []int{n1, n2} { ts := tc.Servers[node] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -274,7 +274,7 @@ func TestMigrateWaitsForApplication(t *testing.T) { for _, node := range []int{n1, n2, n3} { ts := tc.Servers[node] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -307,7 +307,7 @@ func TestMigrateWaitsForApplication(t *testing.T) { for _, node := range []int{n1, n2, n3} { ts := tc.Servers[node] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/client_mvcc_gc_test.go b/pkg/kv/kvserver/client_mvcc_gc_test.go index a8d176636b6..b94b67e8987 100644 --- a/pkg/kv/kvserver/client_mvcc_gc_test.go +++ b/pkg/kv/kvserver/client_mvcc_gc_test.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -35,13 +34,12 @@ func TestMVCCGCCorrectStats(t *testing.T) { ctx := context.Background() var args base.TestServerArgs args.Knobs.Store = &kvserver.StoreTestingKnobs{DisableCanAckBeforeApplication: true} - serv := serverutils.StartServerOnly(t, args) - s := serv.(*server.TestServer) + s := serverutils.StartServerOnly(t, args) defer s.Stopper().Stop(ctx) key, err := s.ScratchRange() require.NoError(t, err) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) repl := store.LookupReplica(roachpb.RKey(key)) diff --git a/pkg/kv/kvserver/client_raft_helpers_test.go b/pkg/kv/kvserver/client_raft_helpers_test.go index 749b145bf63..c0c736b76eb 100644 --- a/pkg/kv/kvserver/client_raft_helpers_test.go +++ b/pkg/kv/kvserver/client_raft_helpers_test.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -181,7 +181,7 @@ type testClusterStoreRaftMessageHandler struct { func (h *testClusterStoreRaftMessageHandler) getStore() (*kvserver.Store, error) { ts := h.tc.Servers[h.storeIdx] - return ts.Stores().GetStore(ts.GetFirstStoreID()) + return ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) } func (h *testClusterStoreRaftMessageHandler) HandleRaftRequest( @@ -302,7 +302,7 @@ func setupPartitionedRangeWithHandlers( pr.mu.partitionedNodeIdx = partitionedNodeIdx if replicaID == 0 { ts := tc.Servers[partitionedNodeIdx] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { return nil, err } @@ -383,7 +383,7 @@ func setupPartitionedRangeWithHandlers( } } pr.handlers = append(pr.handlers, h) - tc.Servers[s].RaftTransport().ListenIncomingRaftMessages(tc.Target(s).StoreID, h) + tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) } return pr, nil } @@ -423,7 +423,7 @@ func (pr *testClusterPartitionedRange) extend( // This will replace the previous message handler, if any. func dropRaftMessagesFrom( t *testing.T, - srv *server.TestServer, + srv serverutils.TestServerInterface, rangeID roachpb.RangeID, fromReplicaIDs []roachpb.ReplicaID, cond *atomic.Bool, @@ -436,9 +436,9 @@ func dropRaftMessagesFrom( return rID == rangeID && (cond == nil || cond.Load()) && dropFrom[from] } - store, err := srv.Stores().GetStore(srv.GetFirstStoreID()) + store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID()) require.NoError(t, err) - srv.RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + srv.RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: rangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 47e7283edf1..2fe6f538ef0 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -948,7 +948,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { return hb.FromReplicaID == partReplDesc.ReplicaID } } - tc.Servers[s].RaftTransport().ListenIncomingRaftMessages(tc.Target(s).StoreID, h) + tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) } // Perform a series of writes on the partitioned replica. The writes will @@ -1044,7 +1044,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // Remove the partition. Snapshot should follow. log.Infof(ctx, "test: removing the partition") for _, s := range []int{0, 1, 2} { - tc.Servers[s].RaftTransport().ListenIncomingRaftMessages(tc.Target(s).StoreID, &unreliableRaftHandler{ + tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, &unreliableRaftHandler{ rangeID: partRepl.RangeID, IncomingRaftMessageHandler: tc.GetFirstStoreFromServer(t, s), unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -1075,7 +1075,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // Perform another write. The partitioned replica should be able to receive // replicated updates. incArgs = incrementArgs(key, incC) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, key, []int64{incABC, incABC, incABC}) @@ -1211,7 +1211,7 @@ func TestRequestsOnLaggingReplica(t *testing.T) { t.Fatalf("expected leader to be 1 or 2, was: %d", leaderReplicaID) } leaderNodeIdx := int(leaderReplicaID - 1) - leaderNode := tc.Server(leaderNodeIdx).(*server.TestServer) + leaderNode := tc.Server(leaderNodeIdx) leaderStore, err := leaderNode.GetStores().(*kvserver.Stores).GetStore(leaderNode.GetFirstStoreID()) require.NoError(t, err) @@ -2823,7 +2823,7 @@ func TestReportUnreachableHeartbeats(t *testing.T) { // Shut down a raft transport via the circuit breaker, and wait for two // election timeouts to trigger an election if reportUnreachable broke // heartbeat transmission to the other store. - b, ok := tc.Servers[followerIdx].RaftTransport().GetCircuitBreaker( + b, ok := tc.Servers[followerIdx].RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker( tc.Target(followerIdx).NodeID, rpc.DefaultClass) require.True(t, ok) undo := circuit.TestingSetTripped(b, errors.New("boom")) @@ -2913,7 +2913,7 @@ func TestReportUnreachableRemoveRace(t *testing.T) { var undos []func() for i := range tc.Servers { if i != partitionedMaybeLeaseholderIdx { - b, ok := tc.Servers[i].RaftTransport().GetCircuitBreaker(tc.Target(partitionedMaybeLeaseholderIdx).NodeID, rpc.DefaultClass) + b, ok := tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker(tc.Target(partitionedMaybeLeaseholderIdx).NodeID, rpc.DefaultClass) require.True(t, ok) undos = append(undos, circuit.TestingSetTripped(b, errors.New("boom"))) } @@ -3138,7 +3138,7 @@ func TestRaftAfterRemoveRange(t *testing.T) { StoreID: target2.StoreID, } - tc.Servers[2].RaftTransport().SendAsync(&kvserverpb.RaftMessageRequest{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ ToReplica: replica1, FromReplica: replica2, Heartbeats: []kvserverpb.RaftHeartbeat{ @@ -3314,8 +3314,8 @@ func TestReplicaGCRace(t *testing.T) { toStore := tc.GetFirstStoreFromServer(t, 2) // Prevent the victim replica from processing configuration changes. - tc.Servers[2].RaftTransport().StopIncomingRaftMessages(toStore.Ident.StoreID) - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(toStore.Ident.StoreID, &noConfChangeTestHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).StopIncomingRaftMessages(toStore.Ident.StoreID) + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(toStore.Ident.StoreID, &noConfChangeTestHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: toStore, }) @@ -3736,7 +3736,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { // established after the first node's removal. value := int64(5) incArgs := incrementArgs(key, value) - if _, err := kv.SendWrapped(ctx, tc.Servers[1].DistSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(ctx, tc.Servers[1].DistSenderI().(kv.Sender), incArgs); err != nil { t.Fatal(err) } @@ -4341,7 +4341,7 @@ func TestUninitializedReplicaRemainsQuiesced(t *testing.T) { } s2, err := tc.Server(1).GetStores().(*kvserver.Stores).GetStore(tc.Server(1).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[1].RaftTransport().ListenIncomingRaftMessages(s2.StoreID(), &unreliableRaftHandler{ + tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s2.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: s2, unreliableRaftHandlerFuncs: handlerFuncs, @@ -4516,7 +4516,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { defer tc.Stopper().Stop(ctx) store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) - distSender := tc.Servers[0].DistSender() + distSender := tc.Servers[0].DistSenderI().(kv.Sender) key := []byte("a") tc.SplitRangeOrFatal(t, key) @@ -4704,7 +4704,7 @@ func TestStoreWaitForReplicaInit(t *testing.T) { var repl *kvserver.Replica testutils.SucceedsSoon(t, func() (err error) { // Try several times, as the message may be dropped (see #18355). - tc.Servers[0].RaftTransport().SendAsync(&kvserverpb.RaftMessageRequest{ + tc.Servers[0].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ ToReplica: roachpb.ReplicaDescriptor{ NodeID: store.Ident.NodeID, StoreID: store.Ident.StoreID, @@ -4760,7 +4760,7 @@ func TestTracingDoesNotRaceWithCancelation(t *testing.T) { require.Nil(t, err) for i := 0; i < 3; i++ { - tc.Servers[i].RaftTransport().ListenIncomingRaftMessages(tc.Target(i).StoreID, &unreliableRaftHandler{ + tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(i).StoreID, &unreliableRaftHandler{ rangeID: ri.Desc.RangeID, IncomingRaftMessageHandler: tc.GetFirstStoreFromServer(t, i), unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -5687,7 +5687,7 @@ func TestElectionAfterRestart(t *testing.T) { var err error var lastIndex kvpb.RaftIndex for _, srv := range tc.Servers { - _ = srv.Stores().VisitStores(func(s *kvserver.Store) error { + _ = srv.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { s.VisitReplicas(func(replica *kvserver.Replica) (more bool) { if replica.RangeID != rangeID { return @@ -5712,7 +5712,7 @@ func TestElectionAfterRestart(t *testing.T) { return nil }) for _, srv := range tc.Servers { - require.NoError(t, srv.Stores().VisitStores(func(s *kvserver.Store) error { + require.NoError(t, srv.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { return s.TODOEngine().Flush() })) } @@ -5818,7 +5818,7 @@ func TestRaftSnapshotsWithMVCCRangeKeys(t *testing.T) { // Read them back from all stores. for _, srv := range tc.Servers { - store, err := srv.Stores().GetStore(srv.GetFirstStoreID()) + store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID()) require.NoError(t, err) require.Equal(t, kvs{ rangeKVWithTS("a", "b", ts1, storage.MVCCValue{}), diff --git a/pkg/kv/kvserver/client_relocate_range_test.go b/pkg/kv/kvserver/client_relocate_range_test.go index eb1869986d9..b9d4f1b126a 100644 --- a/pkg/kv/kvserver/client_relocate_range_test.go +++ b/pkg/kv/kvserver/client_relocate_range_test.go @@ -462,7 +462,7 @@ func TestReplicaRemovalDuringGet(t *testing.T) { // Perform write. pArgs := putArgs(key, []byte("foo")) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), pArgs) + _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), pArgs) require.Nil(t, pErr) // Perform delayed read during replica removal. @@ -488,7 +488,7 @@ func TestReplicaRemovalDuringCPut(t *testing.T) { // Perform write. pArgs := putArgs(key, []byte("foo")) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), pArgs) + _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), pArgs) require.Nil(t, pErr) // Perform delayed conditional put during replica removal. This will cause @@ -564,7 +564,7 @@ func setupReplicaRemovalTest( srv := tc.Servers[0] err := srv.Stopper().RunAsyncTask(ctx, "request", func(ctx context.Context) { reqCtx := context.WithValue(ctx, magicKey{}, struct{}{}) - resp, pErr := kv.SendWrapped(reqCtx, srv.DistSender(), req) + resp, pErr := kv.SendWrapped(reqCtx, srv.DistSenderI().(kv.Sender), req) resultC <- result{resp, pErr} }) require.NoError(t, err) diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go index 44e6667e419..12fd0993d60 100644 --- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" @@ -906,7 +906,7 @@ func (cbt *circuitBreakerTest) ExpireAllLeasesAndN1LivenessRecord( require.True(t, ok) ts := hlc.Timestamp{WallTime: self.Expiration.WallTime} - require.NoError(t, srv.Stores().VisitStores(func(s *kvserver.Store) error { + require.NoError(t, srv.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { s.VisitReplicas(func(replica *kvserver.Replica) (wantMore bool) { lease, next := replica.GetLease() if lease.Expiration != nil { @@ -1007,10 +1007,10 @@ func (cbt *circuitBreakerTest) SendCtxTS( func (cbt *circuitBreakerTest) WriteDS(idx int) error { cbt.t.Helper() put := kvpb.NewPut(cbt.repls[idx].Desc().StartKey.AsRawKey(), roachpb.MakeValueFromString("hello")) - return cbt.sendViaDistSender(cbt.Servers[idx].DistSender(), put) + return cbt.sendViaDistSender(cbt.Servers[idx].DistSenderI().(kv.Sender), put) } -func (cbt *circuitBreakerTest) sendViaDistSender(ds *kvcoord.DistSender, req kvpb.Request) error { +func (cbt *circuitBreakerTest) sendViaDistSender(ds kv.Sender, req kvpb.Request) error { cbt.t.Helper() ba := &kvpb.BatchRequest{} ba.Add(req) diff --git a/pkg/kv/kvserver/client_replica_gc_test.go b/pkg/kv/kvserver/client_replica_gc_test.go index 2b1dde8d062..f8aaa9a7a4e 100644 --- a/pkg/kv/kvserver/client_replica_gc_test.go +++ b/pkg/kv/kvserver/client_replica_gc_test.go @@ -91,7 +91,7 @@ func TestReplicaGCQueueDropReplicaDirect(t *testing.T) { require.NoError(t, tc.WaitForVoters(k, tc.Target(1), tc.Target(2))) ts := tc.Servers[1] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -171,7 +171,7 @@ func TestReplicaGCQueueDropReplicaGCOnScan(t *testing.T) { defer tc.Stopper().Stop(context.Background()) ts := tc.Servers[1] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index 10af893901b..5a897ab0166 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -162,7 +162,7 @@ func TestLeaseholdersRejectClockUpdateWithJump(t *testing.T) { manual := hlc.NewHybridManualClock() ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manual, @@ -170,9 +170,8 @@ func TestLeaseholdersRejectClockUpdateWithJump(t *testing.T) { Store: &kvserver.StoreTestingKnobs{DisableCanAckBeforeApplication: true}, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) manual.Pause() @@ -271,7 +270,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { } manual := hlc.NewHybridManualClock() ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manual, @@ -287,9 +286,8 @@ func TestTxnPutOutOfOrder(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Put an initial value. @@ -453,16 +451,15 @@ func TestTxnReadWithinUncertaintyInterval(t *testing.T) { func testTxnReadWithinUncertaintyInterval(t *testing.T, observedTS bool, readOp string) { ctx := context.Background() manual := hlc.NewHybridManualClock() - srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manual, }, }, }) - s := srv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split off a scratch range. @@ -633,7 +630,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // Write to key A and key B in the writer transaction. for _, key := range []roachpb.Key{keyA, keyB} { put := putArgs(key, []byte("val")) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSender(), kvpb.Header{Txn: &writerTxn}, put) + resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &writerTxn}, put) require.Nil(t, pErr) writerTxn.Update(resp.Header().Txn) } @@ -659,7 +656,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( et.LockSpans[i].EndKey = et.LockSpans[i].Key.Next() } } - etResp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSender(), etH, et) + etResp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), etH, et) require.Nil(t, pErr) writerTxn.Update(etResp.Header().Txn) @@ -686,7 +683,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // transactions are always an observed timestamp from their own gateway node. for i, key := range []roachpb.Key{keyB, keyA} { get := getArgs(key.Next()) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSender(), kvpb.Header{Txn: &readerTxn}, get) + resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) require.Nil(t, pErr) require.Nil(t, resp.(*kvpb.GetResponse).Value) readerTxn.Update(resp.Header().Txn) @@ -738,14 +735,14 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // leading to a stale read. // resolve := resolveIntentArgs(roachpb.PENDING) - _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSender(), resolve) + _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), resolve) require.Nil(t, pErr) } if alreadyResolved { // Resolve the committed value on key B to COMMITTED. resolve := resolveIntentArgs(roachpb.COMMITTED) - _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSender(), resolve) + _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), resolve) require.Nil(t, pErr) } } @@ -754,7 +751,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // ReadWithinUncertaintyIntervalErrors. for _, key := range []roachpb.Key{keyA, keyB} { get := getArgs(key) - _, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSender(), kvpb.Header{Txn: &readerTxn}, get) + _, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) require.NotNil(t, pErr) var rwuiErr *kvpb.ReadWithinUncertaintyIntervalError require.True(t, errors.As(pErr.GetDetail(), &rwuiErr)) @@ -835,7 +832,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // Collect an observed timestamp in that transaction from node 2. getB := getArgs(keyB) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSender(), kvpb.Header{Txn: &txn}, getB) + resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getB) require.Nil(t, pErr) txn.Update(resp.Header().Txn) require.Len(t, txn.ObservedTimestamps, 1) @@ -860,7 +857,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // stale read. ba := &kvpb.BatchRequest{} ba.Add(putArgs(keyA, []byte("val"))) - br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) writeTs := br.Timestamp @@ -882,7 +879,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // avoid the uncertainty error. This is a good thing, as doing so would allow // for a stale read. getA := getArgs(keyA) - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[1].DistSender(), kvpb.Header{Txn: &txn}, getA) + _, pErr = kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getA) require.NotNil(t, pErr) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) } @@ -1011,7 +1008,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // Write the data from a different transaction to establish the time for the // key as 10 ns in the future. - _, pErr := kv.SendWrapped(ctx, tc.Servers[2].DistSender(), putArgs(keyC, []byte("value"))) + _, pErr := kv.SendWrapped(ctx, tc.Servers[2].DistSenderI().(kv.Sender), putArgs(keyC, []byte("value"))) require.Nil(t, pErr) // Create two identical transactions. The first one will perform a read to a @@ -1020,7 +1017,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { txn2 := roachpb.MakeTransaction("txn2", keyA, isolation.Serializable, 1, now, maxOffset, instanceId) // Simulate a read which will cause the observed time to be set to now - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSender(), kvpb.Header{Txn: &txn}, getArgs(keyA)) + resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyA)) require.Nil(t, pErr) // The client needs to update its transaction to the returned transaction which has observed timestamps in it txn = *resp.Header().Txn @@ -1046,7 +1043,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // Try and read the transaction from the context of a new transaction. This // will fail as expected as the observed timestamp will not be set. - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSender(), kvpb.Header{Txn: &txn2}, getArgs(keyC)) + _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn2}, getArgs(keyC)) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) // Try and read the key from the existing transaction. This should fail the @@ -1056,7 +1053,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // - Other error (Bad) - We expect an uncertainty error so the client can choose a new timestamp and retry. // - Not found (Bad) - Error because the data was written before us. // - Found (Bad) - The write HLC timestamp is after our timestamp. - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSender(), kvpb.Header{Txn: &txn}, getArgs(keyC)) + _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyC)) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) } @@ -1205,7 +1202,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // absence would not be a true stale read. ba := &kvpb.BatchRequest{} ba.Add(putArgs(key, []byte("val"))) - br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) writeTs := br.Timestamp require.True(t, nonTxnOrigTs.Less(writeTs)) @@ -1248,7 +1245,7 @@ func TestRangeLookupUseReverse(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -1256,9 +1253,8 @@ func TestRangeLookupUseReverse(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Init test ranges: @@ -1463,7 +1459,7 @@ func setupLeaseTransferTest(t *testing.T) *leaseTransferTest { // First, do a write; we'll use it to determine when the dust has settled. l.leftKey = key incArgs := incrementArgs(l.leftKey, 1) - if _, pErr := kv.SendWrapped(context.Background(), l.tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), l.tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } l.replica0 = l.tc.GetFirstStoreFromServer(t, 0).LookupReplica(roachpb.RKey(key)) @@ -1916,7 +1912,7 @@ func TestLeaseExpirationBelowFutureTimeRequest(t *testing.T) { // to update the store's clock. See Replica.checkRequestTimeRLocked for // the exact determination of whether a request timestamp is too far in // the future or not. - leaseRenewal := l.tc.Servers[1].Cfg.RangeLeaseRenewalDuration() + leaseRenewal := l.tc.Servers[1].RaftConfig().RangeLeaseRenewalDuration() leaseRenewalMinusStasis := leaseRenewal - l.tc.Servers[1].Clock().MaxOffset() reqTime := now.Add(leaseRenewalMinusStasis.Nanoseconds()-10, 0) if tooFarInFuture { @@ -2003,7 +1999,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { // Do a write on node1 to establish a key with its timestamp at now. if _, pErr := kv.SendWrapped( - ctx, tc.Servers[0].DistSender(), putArgs(keyA, []byte("value")), + ctx, tc.Servers[0].DistSenderI().(kv.Sender), putArgs(keyA, []byte("value")), ); pErr != nil { t.Fatal(pErr) } @@ -2035,7 +2031,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { // expect to see an uncertainty interval error. h := kvpb.Header{Txn: &txn} if _, pErr := kv.SendWrappedWith( - ctx, tc.Servers[0].DistSender(), h, getArgs(keyA), + ctx, tc.Servers[0].DistSenderI().(kv.Sender), h, getArgs(keyA), ); !testutils.IsPError(pErr, "uncertainty") { t.Fatalf("expected an uncertainty interval error; got %v", pErr) } @@ -2264,7 +2260,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { } return nil } - srv := serverutils.StartServerOnly(t, + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -2274,7 +2270,6 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { }, }, }) - s := srv.(*server.TestServer) defer s.Stopper().Stop(ctx) store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) @@ -2330,7 +2325,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { } for { - leaseInfo, _, err := s.GetRangeLease(ctx, key, server.AllowQueryToBeForwardedToDifferentNode) + leaseInfo, _, err := s.GetRangeLease(ctx, key, roachpb.AllowQueryToBeForwardedToDifferentNode) if err != nil { t.Fatal(err) } @@ -2476,7 +2471,7 @@ func TestLeaseInfoRequest(t *testing.T) { // use an old, cached, version of the range descriptor that doesn't have the // local replica in it (and so the request would be routed away). // TODO(andrei): Add a batch option to not use the range cache. - s, err := tc.Servers[1].Stores().GetStore(tc.Servers[1].GetFirstStoreID()) + s, err := tc.Servers[1].GetStores().(*kvserver.Stores).GetStore(tc.Servers[1].GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -2519,7 +2514,7 @@ func TestErrorHandlingForNonKVCommand(t *testing.T) { } return nil } - srv := serverutils.StartServerOnly(t, + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -2529,7 +2524,6 @@ func TestErrorHandlingForNonKVCommand(t *testing.T) { }, }, }) - s := srv.(*server.TestServer) defer s.Stopper().Stop(context.Background()) // Send the lease request. @@ -2693,16 +2687,15 @@ func TestClearRange(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{Store: &kvserver.StoreTestingKnobs{ // This makes sure that our writes are visible when we go // straight to the engine to check them. DisableCanAckBeforeApplication: true, }}, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) clearRange := func(start, end roachpb.Key) { @@ -2836,7 +2829,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { ba := &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(keyA)) - br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.Equal(t, readTS, br.Timestamp) v, err := br.Responses[0].GetGet().Value.GetInt() @@ -2856,7 +2849,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { funcs.snapErr = func(*kvserverpb.SnapshotRequest_Header) error { return errors.New("rejected") } - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ rangeID: repl0.GetRangeID(), IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: funcs, @@ -2889,7 +2882,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // Remove the partition. A snapshot to node 2 should follow. This snapshot // will inform node 2 that it is the new leaseholder for the range. Node 2 // should act accordingly and update its internal state to reflect this. - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, store2) + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) tc.WaitForValues(t, keyC, []int64{4, 4, 4}) // Attempt to write under the read on the new leaseholder. The batch @@ -2901,7 +2894,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { ba = &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(keyA, 1)) - br, pErr = tc.Servers[0].DistSender().Send(ctx, ba) + br, pErr = tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, readTS, br.Timestamp) require.True(t, readTS.Less(br.Timestamp)) @@ -2994,7 +2987,7 @@ func TestLeaseTransferRejectedIfTargetNeedsSnapshot(t *testing.T) { funcs.snapErr = func(*kvserverpb.SnapshotRequest_Header) error { return errors.New("rejected") } - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ rangeID: repl0.GetRangeID(), IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: funcs, @@ -3045,7 +3038,7 @@ func TestLeaseTransferRejectedIfTargetNeedsSnapshot(t *testing.T) { require.True(t, isRejectedErr, "%+v", transferErr) // Remove the partition. A snapshot to node 2 should follow. - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store2.Ident.StoreID, store2) + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) tc.WaitForValues(t, keyC, []int64{4, 4, 4}) // Now that node 2 caught up on the log through a snapshot, we should be @@ -3300,7 +3293,7 @@ func TestReplicaTombstone(t *testing.T) { funcs.dropResp = func(*kvserverpb.RaftMessageResponse) bool { return true } - tc.Servers[1].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: funcs, @@ -3356,7 +3349,7 @@ func TestReplicaTombstone(t *testing.T) { raftFuncs.dropReq = func(req *kvserverpb.RaftMessageRequest) bool { return req.ToReplica.StoreID == store.StoreID() } - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: raftFuncs, @@ -3400,7 +3393,7 @@ func TestReplicaTombstone(t *testing.T) { // It will never find out it has been removed. We'll remove it // with a manual replica GC. store, _ := getFirstStoreReplica(t, tc.Server(2), key) - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3439,7 +3432,7 @@ func TestReplicaTombstone(t *testing.T) { rangeID := desc.RangeID // Partition node 2 from all raft communication. store, _ := getFirstStoreReplica(t, tc.Server(2), keyA) - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3520,7 +3513,7 @@ func TestReplicaTombstone(t *testing.T) { waiter.blockSnapshot = true } setMinHeartbeat(repl.ReplicaID() + 1) - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -3627,7 +3620,7 @@ func TestReplicaTombstone(t *testing.T) { raftFuncs.dropReq = func(req *kvserverpb.RaftMessageRequest) bool { return partActive.Load().(bool) && req.Message.Type == raftpb.MsgApp } - tc.Servers[2].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, unreliableRaftHandlerFuncs: raftFuncs, IncomingRaftMessageHandler: &unreliableRaftHandler{ @@ -3744,7 +3737,7 @@ func TestAdminRelocateRangeSafety(t *testing.T) { // completed. // Code above verified r1 is the leaseholder, so use it to ChangeReplicas. - r1, _, err := tc.Servers[0].Stores().GetReplicaForRangeID(ctx, rangeInfo.Desc.RangeID) + r1, _, err := tc.Servers[0].GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeInfo.Desc.RangeID) assert.Nil(t, err) expDescAfterAdd := rangeInfo.Desc // for use with ChangeReplicas expDescAfterAdd.NextReplicaID++ diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index adc23b028d9..25b3a66c892 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -122,7 +122,7 @@ func TestStoreSplitAbortSpan(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -130,9 +130,9 @@ func TestStoreSplitAbortSpan(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) left, middle, right := roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c") @@ -264,7 +264,7 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -272,9 +272,9 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Manually create some the column keys corresponding to the table: @@ -333,7 +333,7 @@ func TestStoreRangeSplitIntents(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -341,9 +341,9 @@ func TestStoreRangeSplitIntents(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // First, write some values left and right of the proposed split key. @@ -411,10 +411,10 @@ func TestQueryLocksAcrossRanges(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv, _, db := serverutils.StartServer(t, base.TestServerArgs{}) - s := serv.(*server.TestServer) + s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // First, write some values left and right of the proposed split key. @@ -543,7 +543,7 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -551,9 +551,9 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split range 1 at an arbitrary key. @@ -602,7 +602,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -611,9 +611,9 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) splitKey := roachpb.Key("m") @@ -764,7 +764,7 @@ func TestStoreRangeSplitMergeStats(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -773,9 +773,9 @@ func TestStoreRangeSplitMergeStats(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split the range after the last table data key. @@ -912,7 +912,7 @@ func TestStoreEmptyRangeSnapshotSize(t *testing.T) { // no user data. splitKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0)) splitArgs := adminSplitArgs(splitKey) - if _, err := kv.SendWrapped(ctx, tc.Servers[0].DistSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); err != nil { t.Fatal(err) } @@ -932,7 +932,7 @@ func TestStoreEmptyRangeSnapshotSize(t *testing.T) { messageRecorder.headers = append(messageRecorder.headers, header) }, } - tc.Servers[1].RaftTransport().ListenIncomingRaftMessages(tc.GetFirstStoreFromServer(t, 1).StoreID(), messageHandler) + tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.GetFirstStoreFromServer(t, 1).StoreID(), messageHandler) // Replicate the newly-split range to trigger a snapshot request from store 0 // to store 1. @@ -970,7 +970,7 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableSplitQueue: true, @@ -978,9 +978,9 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) start := s.Clock().Now() @@ -1075,16 +1075,16 @@ func TestStoreZoneUpdateAndRangeSplit(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) tdb := sqlutils.MakeSQLRunner(sqlDB) @@ -1139,16 +1139,16 @@ func TestStoreRangeSplitWithMaxBytesUpdate(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Find the last range. @@ -1253,7 +1253,7 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ DefaultZoneConfigOverride: &zoneConfig, @@ -1267,9 +1267,9 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split at the split key. @@ -1472,7 +1472,7 @@ func runSetupSplitSnapshotRace( // Split the data range. splitArgs = adminSplitArgs(roachpb.Key("m")) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } @@ -1505,7 +1505,7 @@ func runSetupSplitSnapshotRace( // failure and render the range unable to achieve quorum after // restart (in the SnapshotWins branch). incArgs = incrementArgs(rightKey, 3) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } @@ -1513,7 +1513,7 @@ func runSetupSplitSnapshotRace( tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 2, 5, 5}) // Scan the meta ranges to resolve all intents - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), &kvpb.ScanRequest{ RequestHeader: kvpb.RequestHeader{ Key: keys.MetaMin, @@ -1552,7 +1552,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Perform a write on the left range and wait for it to propagate. incArgs := incrementArgs(leftKey, 10) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, leftKey, []int64{0, 11, 11, 11, 0, 0}) @@ -1563,7 +1563,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Write to the right range. incArgs = incrementArgs(rightKey, 20) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 25, 25, 25}) @@ -1589,7 +1589,7 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // Perform a write on the right range. incArgs := incrementArgs(rightKey, 20) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } @@ -1613,13 +1613,13 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // it helps wake up dormant ranges that would otherwise have to wait // for retry timeouts. incArgs = incrementArgs(leftKey, 10) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, leftKey, []int64{0, 11, 11, 11, 0, 0}) incArgs = incrementArgs(rightKey, 200) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 225, 225, 225}) @@ -1905,7 +1905,7 @@ func TestStoreSplitGCThreshold(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -1913,9 +1913,9 @@ func TestStoreSplitGCThreshold(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) leftKey := roachpb.Key("a") @@ -1966,7 +1966,7 @@ func TestStoreSplitGCHint(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -1974,9 +1974,9 @@ func TestStoreSplitGCHint(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) leftKey := roachpb.Key("a") @@ -2124,7 +2124,7 @@ func TestStoreRangeSplitRaceUninitializedRHS(t *testing.T) { // range). splitKey := roachpb.Key(encoding.EncodeVarintDescending([]byte("a"), int64(i))) splitArgs := adminSplitArgs(splitKey) - _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSender(), splitArgs) + _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), splitArgs) errChan <- pErr }(i) go func() { @@ -2145,7 +2145,7 @@ func TestStoreRangeSplitRaceUninitializedRHS(t *testing.T) { // side in the split trigger was racing with the uninitialized // version for the same group, resulting in clobbered HardState). for term := uint64(1); ; term++ { - if sent := tc.Servers[1].RaftTransport().SendAsync(&kvserverpb.RaftMessageRequest{ + if sent := tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ RangeID: trigger.RightDesc.RangeID, ToReplica: replicas[0], FromReplica: replicas[1], @@ -2212,7 +2212,7 @@ func TestLeaderAfterSplit(t *testing.T) { defer tc.Stopper().Stop(ctx) store := tc.GetFirstStoreFromServer(t, 0) - sender := tc.Servers[0].DistSender() + sender := tc.Servers[0].DistSenderI().(kv.Sender) leftKey := roachpb.Key("a") splitKey := roachpb.Key("m") @@ -2237,10 +2237,10 @@ func TestLeaderAfterSplit(t *testing.T) { func BenchmarkStoreRangeSplit(b *testing.B) { ctx := context.Background() - serv := serverutils.StartServerOnly(b, base.TestServerArgs{}) - s := serv.(*server.TestServer) + s := serverutils.StartServerOnly(b, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(b, err) // Perform initial split of ranges. @@ -2341,7 +2341,7 @@ func TestStoreRangeGossipOnSplits(t *testing.T) { overrideCapacityFraction := 0.5 ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2354,9 +2354,9 @@ func TestStoreRangeGossipOnSplits(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) storeKey := gossip.MakeStoreDescKey(store.StoreID()) @@ -2435,7 +2435,7 @@ func TestStoreTxnWaitQueueEnabledOnSplit(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2443,9 +2443,9 @@ func TestStoreTxnWaitQueueEnabledOnSplit(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) key := bootstrap.TestingUserTableDataMin() @@ -2467,7 +2467,7 @@ func TestDistributedTxnCleanup(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2475,9 +2475,9 @@ func TestDistributedTxnCleanup(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split at "a". @@ -2591,7 +2591,7 @@ func TestUnsplittableRange(t *testing.T) { } splitQueuePurgatoryChan := make(chan time.Time, 1) - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2612,9 +2612,9 @@ func TestUnsplittableRange(t *testing.T) { SQLStatsKnobs: &sqlstats.TestingKnobs{SkipZoneConfigBootstrap: true}, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Add a single large row to /Table/14. @@ -2695,7 +2695,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { return nil } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -2706,9 +2706,9 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) lhsKey := roachpb.Key("a") @@ -2952,14 +2952,14 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, }, }, }) - s := srv.(*server.TestServer) + defer s.Stopper().Stop(ctx) // The following assumes that keys.TestingUserDescID(0) returns 50. @@ -2987,7 +2987,7 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { testutils.RunTrueAndFalse(t, "reverse", func(t *testing.T, rev bool) { // Clear the RangeDescriptorCache so that no cached descriptors are // available from previous lookups. - s.DistSender().RangeDescriptorCache().Clear() + s.DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache().Clear() // Scan from [/Table/49-/Table/50) both forwards and backwards. // Either way, the resulting RangeLookup scan will be forced to @@ -3083,7 +3083,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { return nil } - srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableSplitQueue: true, @@ -3095,9 +3095,9 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { }, }, }) - s := srv.(*server.TestServer) + defer s.Stopper().Stop(context.Background()) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -3105,7 +3105,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { mustSplit := func(splitKey roachpb.Key) { args := adminSplitArgs(splitKey) - // Don't use s.DistSender() so that we don't disturb the RangeDescriptorCache. + // Don't use s.DistSenderI().(kv.Sender) so that we don't disturb the RangeDescriptorCache. rangeID := store.LookupReplica(roachpb.RKey(splitKey)).RangeID _, pErr := kv.SendWrappedWith(context.Background(), store, kvpb.Header{ RangeID: rangeID, @@ -3133,7 +3133,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { for atomic.LoadInt32(&blockedRangeLookups) == 0 && err == nil { // Clear the RangeDescriptorCache to trigger a range lookup when the // lookupKey is next accessed. Then immediately access lookupKey. - s.DistSender().RangeDescriptorCache().Clear() + s.DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache().Clear() _, err = s.DB().Get(context.Background(), lookupKey) } rangeLookupErr <- err @@ -3179,7 +3179,7 @@ func TestRangeLookupAsyncResolveIntent(t *testing.T) { return nil } ctx := context.Background() - srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ // Disable async tasks in the intent resolver. All tasks will be synchronous. @@ -3192,9 +3192,9 @@ func TestRangeLookupAsyncResolveIntent(t *testing.T) { }, }, }) - s := srv.(*server.TestServer) + defer s.Stopper().Stop(context.Background()) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Split range 1 at an arbitrary key so that we're not dealing with the @@ -3262,16 +3262,16 @@ func TestStoreSplitDisappearingReplicas(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) go kvserver.WatchForDisappearingReplicas(t, store) for i := 0; i < 100; i++ { @@ -3390,7 +3390,7 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { }) store, _ := getFirstStoreReplica(t, tc.Server(1), k) - tc.Servers[1].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3468,7 +3468,7 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // Re-enable raft and wait for the lhs to catch up to the post-split // descriptor. This used to panic with "raft group deleted". - tc.Servers[1].RaftTransport().ListenIncomingRaftMessages(store.StoreID(), store) + tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), store) testutils.SucceedsSoon(t, func() error { repl, err := store.GetReplica(descLHS.RangeID) if err != nil { @@ -3509,7 +3509,7 @@ func TestSplitBlocksReadsToRHS(t *testing.T) { } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -3518,9 +3518,9 @@ func TestSplitBlocksReadsToRHS(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) repl := store.LookupReplica(roachpb.RKey(keySplit)) @@ -3622,7 +3622,7 @@ func TestStoreRangeSplitAndMergeWithGlobalReads(t *testing.T) { } ctx := context.Background() - serv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ DisableSpanConfigs: true, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -3631,7 +3631,7 @@ func TestStoreRangeSplitAndMergeWithGlobalReads(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) // Set the closed_timestamp interval to be short to shorten the test duration // because we need to wait for a checkpoint on the system config. @@ -3639,7 +3639,7 @@ func TestStoreRangeSplitAndMergeWithGlobalReads(t *testing.T) { tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '20ms'`) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '20ms'`) clock.Store(s.Clock()) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) config.TestingSetupZoneConfigHook(s.Stopper()) @@ -3865,7 +3865,7 @@ func TestLBSplitUnsafeKeys(t *testing.T) { return nil, false } - serv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ DisableSpanConfigs: true, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -3874,10 +3874,10 @@ func TestLBSplitUnsafeKeys(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) + defer s.Stopper().Stop(ctx) tdb := sqlutils.MakeSQLRunner(sqlDB) - store, err := s.Stores().GetStore(1) + store, err := s.GetStores().(*kvserver.Stores).GetStore(1) require.NoError(t, err) // We want to exercise the case where there are column family keys. diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index 39219daed2e..bf6dd52bd52 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -611,7 +611,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // The stats should magically repair themselves. We'll first do a quick check // and then a full recomputation. - repl, _, err := tc.Servers[0].Stores().GetReplicaForRangeID(ctx, rangeID) + repl, _, err := tc.Servers[0].GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeID) require.NoError(t, err) ms := repl.GetMVCCStats() if ms.SysCount >= sysCountGarbage { diff --git a/pkg/kv/kvserver/flow_control_integration_test.go b/pkg/kv/kvserver/flow_control_integration_test.go index 0ff1c2f63e6..1465ad70e60 100644 --- a/pkg/kv/kvserver/flow_control_integration_test.go +++ b/pkg/kv/kvserver/flow_control_integration_test.go @@ -100,7 +100,7 @@ func TestFlowControlBasic(t *testing.T) { for i := 0; i < numNodes; i++ { si, err := tc.Server(i).GetStores().(*kvserver.Stores).GetStore(tc.Server(i).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[i].RaftTransport().ListenIncomingRaftMessages(si.StoreID(), + tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: si, @@ -1907,7 +1907,7 @@ func TestFlowControlUnquiescedRange(t *testing.T) { for i := 0; i < numNodes; i++ { si, err := tc.Server(i).GetStores().(*kvserver.Stores).GetStore(tc.Server(i).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[i].RaftTransport().ListenIncomingRaftMessages(si.StoreID(), + tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: si, diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go b/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go index 9728664041f..ec6bea20716 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go @@ -335,7 +335,7 @@ func TestSyncIntentResolution_ByteSizePagination(t *testing.T) { func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { for _, s := range tc.Servers { - err = s.Stores().VisitStores(func(store *kvserver.Store) error { + err = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) } diff --git a/pkg/kv/kvserver/liveness/client_test.go b/pkg/kv/kvserver/liveness/client_test.go index e933851c783..e3fb65c5c0e 100644 --- a/pkg/kv/kvserver/liveness/client_test.go +++ b/pkg/kv/kvserver/liveness/client_test.go @@ -176,7 +176,7 @@ func TestNodeLivenessStatusMap(t *testing.T) { tc.WaitForNodeLiveness(t) log.Infof(ctx, "waiting done") - firstServer := tc.Server(0).(*server.TestServer) + firstServer := tc.Server(0) liveNodeID := firstServer.NodeID() diff --git a/pkg/kv/kvserver/node_liveness_test.go b/pkg/kv/kvserver/node_liveness_test.go index 3682e23c659..32581ded4b7 100644 --- a/pkg/kv/kvserver/node_liveness_test.go +++ b/pkg/kv/kvserver/node_liveness_test.go @@ -212,9 +212,8 @@ func TestRedundantNodeLivenessHeartbeatsAvoided(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - s := serv.(*server.TestServer) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) defer s.Stopper().Stop(ctx) @@ -520,7 +519,7 @@ func TestNodeLivenessRestart(t *testing.T) { } // Restart store and verify gossip contains liveness record for nodes 1&2. - require.NoError(t, tc.RestartServerWithInspect(1, func(s *server.TestServer) { + require.NoError(t, tc.RestartServerWithInspect(1, func(s serverutils.TestServerInterface) { livenessRegex := gossip.MakePrefixPattern(gossip.KeyNodeLivenessPrefix) s.GossipI().(*gossip.Gossip). RegisterCallback(livenessRegex, func(key string, _ roachpb.Value) { @@ -749,14 +748,13 @@ func TestNodeLivenessConcurrentHeartbeats(t *testing.T) { ctx := context.Background() manualClock := hlc.NewHybridManualClock() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manualClock, }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) testutils.SucceedsSoon(t, func() error { @@ -981,7 +979,7 @@ func TestNodeLivenessRetryAmbiguousResultError(t *testing.T) { return nil } ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ EvalKnobs: kvserverbase.BatchEvalTestingKnobs{ @@ -990,7 +988,6 @@ func TestNodeLivenessRetryAmbiguousResultError(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) // Verify retry of the ambiguous result for heartbeat loop. @@ -1108,7 +1105,7 @@ func TestNodeLivenessNoRetryOnAmbiguousResultCausedByCancellation(t *testing.T) <-sem return nil } - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ EvalKnobs: kvserverbase.BatchEvalTestingKnobs{ @@ -1123,7 +1120,6 @@ func TestNodeLivenessNoRetryOnAmbiguousResultCausedByCancellation(t *testing.T) }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) nl := s.NodeLiveness().(*liveness.NodeLiveness) diff --git a/pkg/kv/kvserver/range_log_test.go b/pkg/kv/kvserver/range_log_test.go index 4da5ec1baa5..05c7945a435 100644 --- a/pkg/kv/kvserver/range_log_test.go +++ b/pkg/kv/kvserver/range_log_test.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -47,9 +46,8 @@ func countEvents( func TestLogSplits(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - ts := s.(*server.TestServer) ctx := context.Background() + s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) // Count the number of split events. @@ -121,7 +119,7 @@ func TestLogSplits(t *testing.T) { t.Fatal(rows.Err()) } - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -163,8 +161,7 @@ func TestLogMerges(t *testing.T) { }) defer s.Stopper().Stop(ctx) - ts := s.(*server.TestServer) - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -272,7 +269,7 @@ func TestLogRebalances(t *testing.T) { // StoreID 1 is present on the testserver. If this assumption changes in the // future, *any* store will work, but a new method will need to be added to // Stores (or a creative usage of VisitStores could suffice). - store, err := s.(*server.TestServer).Stores().GetStore(roachpb.StoreID(1)) + store, err := s.GetStores().(*kvserver.Stores).GetStore(roachpb.StoreID(1)) if err != nil { t.Fatal(err) } @@ -419,11 +416,10 @@ func TestAsyncLogging(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - ts := s.(*server.TestServer) ctx := context.Background() defer s.Stopper().Stop(ctx) - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Log a fake split event inside a transaction that also writes to key a. diff --git a/pkg/kv/kvserver/replica_closedts_test.go b/pkg/kv/kvserver/replica_closedts_test.go index 91870e11657..7649ecdec21 100644 --- a/pkg/kv/kvserver/replica_closedts_test.go +++ b/pkg/kv/kvserver/replica_closedts_test.go @@ -606,7 +606,7 @@ func TestRejectedLeaseDoesntDictateClosedTimestamp(t *testing.T) { manual.Increment(remainingNanos - pause1 + 1) leaseAcqErrCh := make(chan error) go func() { - r, _, err := n2.Stores().GetReplicaForRangeID(ctx, desc.RangeID) + r, _, err := n2.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) if err != nil { leaseAcqErrCh <- err return @@ -920,7 +920,7 @@ func testNonBlockingReadsWithReaderFn( // Reader goroutines: run one reader per store. for _, s := range tc.Servers { - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) g.Go(func() error { readerFn := readerFnFactory(store, scratchRange.RangeID, keySpan) diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index 60e03bf6deb..87eb20d7ce4 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" @@ -773,7 +772,7 @@ func TestLearnerRaftConfState(t *testing.T) { defer log.Scope(t).Close(t) verifyLearnerInRaftOnNodes := func( - key roachpb.Key, id roachpb.ReplicaID, servers []*server.TestServer, + key roachpb.Key, id roachpb.ReplicaID, servers []serverutils.TestServerInterface, ) { t.Helper() var repls []*kvserver.Replica diff --git a/pkg/kv/kvserver/replica_probe_test.go b/pkg/kv/kvserver/replica_probe_test.go index b957fe68af0..7dae7345132 100644 --- a/pkg/kv/kvserver/replica_probe_test.go +++ b/pkg/kv/kvserver/replica_probe_test.go @@ -171,7 +171,7 @@ func TestReplicaProbeRequest(t *testing.T) { // We can also probe directly at each Replica. This is the intended use case // for Replica-level circuit breakers (#33007). for _, srv := range tc.Servers { - repl, _, err := srv.Stores().GetReplicaForRangeID(ctx, desc.RangeID) + repl, _, err := srv.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) ba := &kvpb.BatchRequest{} ba.Add(probeReq) @@ -189,7 +189,7 @@ func TestReplicaProbeRequest(t *testing.T) { seen.injectedErr = injErr seen.Unlock() for _, srv := range tc.Servers { - repl, _, err := srv.Stores().GetReplicaForRangeID(ctx, desc.RangeID) + repl, _, err := srv.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) ba := &kvpb.BatchRequest{} ba.Timestamp = srv.Clock().Now() diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index f268e35b91e..5817350d527 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -116,7 +116,7 @@ func TestReplicaRangefeed(t *testing.T) { defer tc.Stopper().Stop(ctx) ts := tc.Servers[0] - firstStore, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + firstStore, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -149,7 +149,7 @@ func TestReplicaRangefeed(t *testing.T) { stream := newTestStream() streams[i] = stream srv := tc.Servers[i] - store, err := srv.Stores().GetStore(srv.GetFirstStoreID()) + store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -298,7 +298,7 @@ func TestReplicaRangefeed(t *testing.T) { } server1 := tc.Servers[1] - store1, pErr := server1.Stores().GetStore(server1.GetFirstStoreID()) + store1, pErr := server1.GetStores().(*kvserver.Stores).GetStore(server1.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -470,7 +470,7 @@ func TestReplicaRangefeed(t *testing.T) { testutils.SucceedsSoon(t, func() error { for i := 0; i < numNodes; i++ { ts := tc.Servers[i] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -529,7 +529,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { ) ts := tc.Servers[0] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -622,7 +622,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { streamErrC := make(chan error, 1) rangefeedSpan := mkSpan("a", "z") ts := tc.Servers[removeStore] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -657,7 +657,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { streamErrC := make(chan error, 1) rangefeedSpan := mkSpan("a", "z") ts := tc.Servers[0] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -688,7 +688,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { defer tc.Stopper().Stop(ctx) ts := tc.Servers[0] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -754,22 +754,22 @@ func TestReplicaRangefeedErrors(t *testing.T) { defer tc.Stopper().Stop(ctx) ts2 := tc.Servers[2] - partitionStore, err := ts2.Stores().GetStore(ts2.GetFirstStoreID()) + partitionStore, err := ts2.GetStores().(*kvserver.Stores).GetStore(ts2.GetFirstStoreID()) if err != nil { t.Fatal(err) } ts := tc.Servers[0] - firstStore, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + firstStore, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } - secondStore, err := tc.Servers[1].Stores().GetStore(tc.Servers[1].GetFirstStoreID()) + secondStore, err := tc.Servers[1].GetStores().(*kvserver.Stores).GetStore(tc.Servers[1].GetFirstStoreID()) if err != nil { t.Fatal(err) } for _, server := range tc.Servers { - store, err := server.Stores().GetStore(server.GetFirstStoreID()) + store, err := server.GetStores().(*kvserver.Stores).GetStore(server.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -891,7 +891,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { defer tc.Stopper().Stop(ctx) ts := tc.Servers[0] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -960,7 +960,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { defer tc.Stopper().Stop(ctx) ts := tc.Servers[0] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -1034,7 +1034,7 @@ func TestReplicaRangefeedMVCCHistoryMutationError(t *testing.T) { }) defer tc.Stopper().Stop(ctx) ts := tc.Servers[0] - store, err := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) require.NoError(t, err) tc.SplitRangeOrFatal(t, splitKey) tc.AddVotersOrFatal(t, splitKey, tc.Target(1), tc.Target(2)) diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index 97029880014..2de951e595b 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -121,7 +121,7 @@ func TestReplicateQueueRebalance(t *testing.T) { countReplicas := func() []int { counts := make([]int, len(tc.Servers)) for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(s *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { counts[s.StoreID()-1] += s.ReplicaCount() return nil }) @@ -280,7 +280,7 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { countReplicas := func() (total int, perStore []int) { perStore = make([]int, numStores) for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(s *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.Zero(t, perStore[s.StoreID()-1]) perStore[s.StoreID()-1] = s.ReplicaCount() total += s.ReplicaCount() @@ -293,7 +293,7 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { countLeases := func() (total int, perStore []int) { perStore = make([]int, numStores) for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(s *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { c, err := s.Capacity(ctx, false) require.NoError(t, err) leases := int(c.LeaseCount) @@ -393,7 +393,7 @@ func TestReplicateQueueUpReplicateOddVoters(t *testing.T) { tc.AddAndStartServer(t, base.TestServerArgs{}) - if err := tc.Servers[0].Stores().VisitStores(func(s *kvserver.Store) error { + if err := tc.Servers[0].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { return s.ForceReplicationScanAndProcess() }); err != nil { t.Fatal(err) @@ -407,7 +407,7 @@ func TestReplicateQueueUpReplicateOddVoters(t *testing.T) { } var store *kvserver.Store - _ = tc.Servers[0].Stores().VisitStores(func(s *kvserver.Store) error { + _ = tc.Servers[0].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { store = s return nil }) @@ -484,7 +484,7 @@ func TestReplicateQueueDownReplicate(t *testing.T) { require.NoError(t, err) for _, s := range tc.Servers { - require.NoError(t, s.Stores().VisitStores(func(s *kvserver.Store) error { + require.NoError(t, s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.NoError(t, s.ForceReplicationScanAndProcess()) return nil })) @@ -513,7 +513,7 @@ func scanAndGetNumNonVoters( ) (numNonVoters int) { for _, s := range tc.Servers { // Nudge internal queues to up/down-replicate our scratch range. - require.NoError(t, s.Stores().VisitStores(func(s *kvserver.Store) error { + require.NoError(t, s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.NoError(t, s.ForceSplitScanAndProcess()) require.NoError(t, s.ForceReplicationScanAndProcess()) require.NoError(t, s.ForceRaftSnapshotQueueProcess()) @@ -1019,7 +1019,7 @@ func getLeaseholderStore( return nil, err } leaseHolderSrv := tc.Servers[leaseHolder.NodeID-1] - store, err := leaseHolderSrv.Stores().GetStore(leaseHolder.StoreID) + store, err := leaseHolderSrv.GetStores().(*kvserver.Stores).GetStore(leaseHolder.StoreID) if err != nil { return nil, err } @@ -1402,7 +1402,7 @@ func getAggregateMetricCounts( ) (currentCount int64, currentVoterCount int64) { for _, s := range tc.Servers { if storeId, exists := voterMap[s.NodeID()]; exists { - store, err := s.Stores().GetStore(storeId) + store, err := s.GetStores().(*kvserver.Stores).GetStore(storeId) if err != nil { log.Errorf(ctx, "error finding store: %s", err) continue @@ -1723,7 +1723,7 @@ func filterRangeLog( func toggleReplicationQueues(tc *testcluster.TestCluster, active bool) { for _, s := range tc.Servers { - _ = s.Stores().VisitStores(func(store *kvserver.Store) error { + _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(active) return nil }) @@ -1732,7 +1732,7 @@ func toggleReplicationQueues(tc *testcluster.TestCluster, active bool) { func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { for _, s := range tc.Servers { - err = s.Stores().VisitStores(func(store *kvserver.Store) error { + err = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) } @@ -1741,7 +1741,7 @@ func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { func toggleSplitQueues(tc *testcluster.TestCluster, active bool) { for _, s := range tc.Servers { - _ = s.Stores().VisitStores(func(store *kvserver.Store) error { + _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetSplitQueueActive(active) return nil }) @@ -1829,7 +1829,7 @@ func TestLargeUnsplittableRangeReplicate(t *testing.T) { forceProcess := func() { // Speed up the queue processing. for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(store *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) require.NoError(t, err) @@ -1954,7 +1954,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { rangeID, remoteNodeID, leaseHolderNodeID) leaseHolderSrv := tc.Servers[leaseHolderNodeID-1] leaseHolderStoreID := leaseHolderSrv.GetFirstStoreID() - leaseHolderStore, err := leaseHolderSrv.Stores().GetStore(leaseHolderStoreID) + leaseHolderStore, err := leaseHolderSrv.GetStores().(*kvserver.Stores).GetStore(leaseHolderStoreID) if err != nil { t.Fatal(err) } @@ -1962,7 +1962,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { // Start delaying Raft messages to the remote node remoteSrv := tc.Servers[remoteNodeID-1] remoteStoreID := remoteSrv.GetFirstStoreID() - remoteStore, err := remoteSrv.Stores().GetStore(remoteStoreID) + remoteStore, err := remoteSrv.GetStores().(*kvserver.Stores).GetStore(remoteStoreID) if err != nil { t.Fatal(err) } @@ -2053,7 +2053,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { return nil } currentSrv := tc.Servers[leaseBefore.Replica.NodeID-1] - leaseStore, err := currentSrv.Stores().GetStore(currentSrv.GetFirstStoreID()) + leaseStore, err := currentSrv.GetStores().(*kvserver.Stores).GetStore(currentSrv.GetFirstStoreID()) if err != nil { return err } @@ -2156,7 +2156,7 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { forceProcess := func() { // Speed up the queue processing. for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(store *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) require.NoError(t, err) @@ -2185,7 +2185,7 @@ func iterateOverAllStores( t *testing.T, tc *testcluster.TestCluster, f func(*kvserver.Store) error, ) { for _, server := range tc.Servers { - require.NoError(t, server.Stores().VisitStores(f)) + require.NoError(t, server.GetStores().(*kvserver.Stores).VisitStores(f)) } } diff --git a/pkg/kv/kvserver/replicate_test.go b/pkg/kv/kvserver/replicate_test.go index d600e2c507a..7db41d54ae3 100644 --- a/pkg/kv/kvserver/replicate_test.go +++ b/pkg/kv/kvserver/replicate_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -31,7 +30,7 @@ func TestEagerReplication(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ // Need to trick the server to think it's part of a cluster, otherwise it // will set the default zone config to require 1 replica and the split // bellow will not trigger a replication attempt. @@ -44,9 +43,8 @@ func TestEagerReplication(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Make sure everything goes through the replicate queue, so the start count diff --git a/pkg/kv/kvserver/ts_maintenance_queue_test.go b/pkg/kv/kvserver/ts_maintenance_queue_test.go index e6333507e41..96a95aaaeee 100644 --- a/pkg/kv/kvserver/ts_maintenance_queue_test.go +++ b/pkg/kv/kvserver/ts_maintenance_queue_test.go @@ -106,7 +106,7 @@ func TestTimeSeriesMaintenanceQueue(t *testing.T) { manual := hlc.NewHybridManualClock() ctx := context.Background() - serv := serverutils.StartServerOnly(t, base.TestServerArgs{ + s := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manual, @@ -119,9 +119,8 @@ func TestTimeSeriesMaintenanceQueue(t *testing.T) { }, }, }) - s := serv.(*server.TestServer) defer s.Stopper().Stop(ctx) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Generate several splits. The "c"-"zz" range is not going to be considered @@ -236,8 +235,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { }, }) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) - tsdb := tsrv.TsDB() + tsdb := s.TsDB().(*ts.DB) // Populate time series data into the server. One time series, with one // datapoint at the current time and two datapoints older than the pruning @@ -245,7 +243,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { // periods; this simplifies verification. seriesName := "test.metric" sourceName := "source1" - now := tsrv.Clock().PhysicalNow() + now := s.Clock().PhysicalNow() nearPast := now - (tsdb.PruneThreshold(ts.Resolution10s) * 2) farPast := now - (tsdb.PruneThreshold(ts.Resolution10s) * 4) sampleDuration := ts.Resolution10s.SampleDuration() @@ -339,7 +337,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { // Force pruning. storeID := roachpb.StoreID(1) - store, err := tsrv.Stores().GetStore(roachpb.StoreID(1)) + store, err := s.GetStores().(*kvserver.Stores).GetStore(roachpb.StoreID(1)) if err != nil { t.Fatalf("error retrieving store %d: %+v", storeID, err) } diff --git a/pkg/kv/txn_external_test.go b/pkg/kv/txn_external_test.go index 65eee9d7f7f..9e00ac15de4 100644 --- a/pkg/kv/txn_external_test.go +++ b/pkg/kv/txn_external_test.go @@ -411,7 +411,7 @@ func testTxnNegotiateAndSendDoesNotBlock(t *testing.T, multiRange, strict, route // Reader goroutines: perform bounded-staleness reads that hit the server-side // negotiation fast-path. for _, s := range tc.Servers { - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) tracer := s.Tracer() g.Go(func() error { diff --git a/pkg/roachpb/BUILD.bazel b/pkg/roachpb/BUILD.bazel index 7a07e2462e0..548df18885e 100644 --- a/pkg/roachpb/BUILD.bazel +++ b/pkg/roachpb/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "data.go", "index_usage_stats.go", "internal.go", + "leaseinfo.go", "merge_spans.go", "metadata.go", "metadata_replicas.go", diff --git a/pkg/roachpb/leaseinfo.go b/pkg/roachpb/leaseinfo.go new file mode 100644 index 00000000000..b107f14cf4d --- /dev/null +++ b/pkg/roachpb/leaseinfo.go @@ -0,0 +1,49 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package roachpb + +// LeaseInfo describes a range's current and potentially future lease. +type LeaseInfo struct { + cur, next Lease +} + +// MakeLeaseInfo creates a LeaseInfo with the given current and next leases. +func MakeLeaseInfo(cur, next Lease) LeaseInfo { + return LeaseInfo{cur: cur, next: next} +} + +// Current returns the range's current lease. +func (l LeaseInfo) Current() Lease { + return l.cur +} + +// CurrentOrProspective returns the range's potential next lease, if a lease +// request is in progress, or the current lease otherwise. +func (l LeaseInfo) CurrentOrProspective() Lease { + if !l.next.Empty() { + return l.next + } + return l.cur +} + +// LeaseInfoOpt enumerates options for GetRangeLease. +type LeaseInfoOpt int + +const ( + // AllowQueryToBeForwardedToDifferentNode specifies that, if the current node + // doesn't have a voter replica, the lease info can come from a different + // node. + AllowQueryToBeForwardedToDifferentNode LeaseInfoOpt = iota + // QueryLocalNodeOnly specifies that an error should be returned if the node + // is not able to serve the lease query (because it doesn't have a voting + // replica). + QueryLocalNodeOnly +) diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 3c08eb21bd9..356b57ec1c8 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -179,6 +179,7 @@ go_library( "//pkg/server/autoconfig/acprovider", "//pkg/server/debug", "//pkg/server/debug/pprofui", + "//pkg/server/decommissioning", "//pkg/server/diagnostics", "//pkg/server/diagnostics/diagnosticspb", "//pkg/server/goroutinedumper", @@ -446,6 +447,7 @@ go_test( "server_http_test.go", "server_import_ts_test.go", "server_internal_executor_factory_test.go", + "server_special_test.go", "server_startup_test.go", "server_systemlog_gc_test.go", "server_test.go", @@ -498,6 +500,7 @@ go_test( "//pkg/server/apiconstants", "//pkg/server/authserver", "//pkg/server/diagnostics", + "//pkg/server/privchecker", "//pkg/server/rangetestutils", "//pkg/server/serverpb", "//pkg/server/srvtestutils", @@ -510,6 +513,7 @@ go_test( "//pkg/sql", "//pkg/sql/appstatspb", "//pkg/sql/execinfrapb", + "//pkg/sql/isql", "//pkg/sql/roleoption", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", diff --git a/pkg/server/admin.go b/pkg/server/admin.go index ba0d7035df6..85b1b8f98b8 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -2089,7 +2089,7 @@ func (s *adminServer) Cluster( }, nil } -// Health returns whether this sql tenant is ready to receive +// Health returns whether this tenant server is ready to receive // traffic. // // See the docstring for HealthRequest for more details about @@ -2109,13 +2109,26 @@ func (s *adminServer) Health( return resp, nil } - if !s.sqlServer.isReady.Get() { - return nil, grpcstatus.Errorf(codes.Unavailable, "node is not accepting SQL clients") + if err := s.checkReadinessForHealthCheck(ctx); err != nil { + return nil, err } return resp, nil } +// checkReadinessForHealthCheck returns a gRPC error. +func (s *adminServer) checkReadinessForHealthCheck(ctx context.Context) error { + if err := s.grpc.health(ctx); err != nil { + return err + } + + if !s.sqlServer.isReady.Get() { + return grpcstatus.Errorf(codes.Unavailable, "node is not accepting SQL clients") + } + + return nil +} + // Health returns liveness for the node target of the request. // // See the docstring for HealthRequest for more details about @@ -2143,18 +2156,8 @@ func (s *systemAdminServer) Health( // checkReadinessForHealthCheck returns a gRPC error. func (s *systemAdminServer) checkReadinessForHealthCheck(ctx context.Context) error { - serveMode := s.grpc.mode.get() - switch serveMode { - case modeInitializing: - return grpcstatus.Error(codes.Unavailable, "node is waiting for cluster initialization") - case modeDraining: - // grpc.mode is set to modeDraining when the Drain(DrainMode_CLIENT) has - // been called (client connections are to be drained). - return grpcstatus.Errorf(codes.Unavailable, "node is shutting down") - case modeOperational: - break - default: - return srverrors.ServerError(ctx, errors.Newf("unknown mode: %v", serveMode)) + if err := s.grpc.health(ctx); err != nil { + return err } status := s.nodeLiveness.GetNodeVitalityFromCache(roachpb.NodeID(s.serverIterator.getID())) diff --git a/pkg/server/api_v2.go b/pkg/server/api_v2.go index 1d9b69fdd7f..601342ec636 100644 --- a/pkg/server/api_v2.go +++ b/pkg/server/api_v2.go @@ -347,6 +347,12 @@ func (a *apiV2Server) listSessions(w http.ResponseWriter, r *http.Request) { // "500": // description: Indicates unhealthy node. func (a *apiV2SystemServer) health(w http.ResponseWriter, r *http.Request) { + healthInternal(w, r, a.systemAdmin.checkReadinessForHealthCheck) +} + +func healthInternal( + w http.ResponseWriter, r *http.Request, checkReadinessForHealthCheck func(context.Context) error, +) { ready := false readyStr := r.URL.Query().Get("ready") if len(readyStr) > 0 { @@ -366,7 +372,7 @@ func (a *apiV2SystemServer) health(w http.ResponseWriter, r *http.Request) { return } - if err := a.systemAdmin.checkReadinessForHealthCheck(ctx); err != nil { + if err := checkReadinessForHealthCheck(ctx); err != nil { srverrors.APIV2InternalError(ctx, err, w) return } @@ -374,7 +380,7 @@ func (a *apiV2SystemServer) health(w http.ResponseWriter, r *http.Request) { } func (a *apiV2Server) health(w http.ResponseWriter, r *http.Request) { - apiutil.WriteJSONResponse(r.Context(), w, http.StatusNotImplemented, nil) + healthInternal(w, r, a.admin.checkReadinessForHealthCheck) } // swagger:operation GET /rules/ rules diff --git a/pkg/server/application_api/activity_test.go b/pkg/server/application_api/activity_test.go index 6f4519f0511..05f6ccce0b5 100644 --- a/pkg/server/application_api/activity_test.go +++ b/pkg/server/application_api/activity_test.go @@ -17,7 +17,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" @@ -35,8 +34,7 @@ func TestListActivitySecurity(t *testing.T) { ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ts := s.(*server.TestServer) - defer ts.Stopper().Stop(ctx) + defer s.Stopper().Stop(ctx) expectedErrNoPermission := "this operation requires the VIEWACTIVITY or VIEWACTIVITYREDACTED system privilege" contentionMsg := &serverpb.ListContentionEventsResponse{} @@ -108,7 +106,7 @@ func TestListActivitySecurity(t *testing.T) { } // gRPC requests behave as root and thus are always allowed. - client := ts.GetStatusClient(t) + client := s.GetStatusClient(t) { request := &serverpb.ListContentionEventsRequest{} if resp, err := client.ListLocalContentionEvents(ctx, request); err != nil || len(resp.Errors) > 0 { diff --git a/pkg/server/application_api/metrics_test.go b/pkg/server/application_api/metrics_test.go index 4961bf8717b..1a97e9f9e29 100644 --- a/pkg/server/application_api/metrics_test.go +++ b/pkg/server/application_api/metrics_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -31,10 +30,8 @@ import ( func TestMetricsMetadata(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) - - s := srv.(*server.TestServer) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(context.Background()) metricsMetadata := s.MetricsRecorder().GetMetricsMetadata() diff --git a/pkg/server/application_api/sessions_test.go b/pkg/server/application_api/sessions_test.go index 171b04fe082..739cc8a4401 100644 --- a/pkg/server/application_api/sessions_test.go +++ b/pkg/server/application_api/sessions_test.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" @@ -42,8 +41,7 @@ func TestListSessionsSecurity(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - ts := s.(*server.TestServer) - defer ts.Stopper().Stop(context.Background()) + defer s.Stopper().Stop(context.Background()) ctx := context.Background() @@ -70,7 +68,7 @@ func TestListSessionsSecurity(t *testing.T) { } for _, tc := range testCases { var response serverpb.ListSessionsResponse - err := srvtestutils.GetStatusJSONProtoWithAdminOption(ts, tc.endpoint, &response, requestWithAdmin) + err := srvtestutils.GetStatusJSONProtoWithAdminOption(s, tc.endpoint, &response, requestWithAdmin) if tc.expectedErr == "" { if err != nil || len(response.Errors) > 0 { t.Errorf("unexpected failure listing sessions from %s; error: %v; response errors: %v", @@ -92,7 +90,7 @@ func TestListSessionsSecurity(t *testing.T) { } // gRPC requests behave as root and thus are always allowed. - client := ts.GetStatusClient(t) + client := s.GetStatusClient(t) for _, user := range []string{"", apiconstants.TestingUser, username.RootUser} { request := &serverpb.ListSessionsRequest{Username: user} diff --git a/pkg/server/application_api/storage_inspection_test.go b/pkg/server/application_api/storage_inspection_test.go index 1ec62453d09..2585b41a9b6 100644 --- a/pkg/server/application_api/storage_inspection_test.go +++ b/pkg/server/application_api/storage_inspection_test.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" "github.com/cockroachdb/cockroach/pkg/server/rangetestutils" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -453,7 +452,6 @@ func TestSpanStatsGRPCResponse(t *testing.T) { ctx := context.Background() s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ts := s.(*server.TestServer) span := roachpb.Span{ Key: roachpb.RKeyMin.AsRawKey(), @@ -464,13 +462,13 @@ func TestSpanStatsGRPCResponse(t *testing.T) { Spans: []roachpb.Span{span}, } - client := ts.GetStatusClient(t) + client := s.GetStatusClient(t) response, err := client.SpanStats(ctx, &request) if err != nil { t.Fatal(err) } - initialRanges, err := ts.ExpectedInitialRangeCount() + initialRanges, err := s.ExpectedInitialRangeCount() if err != nil { t.Fatal(err) } diff --git a/pkg/server/application_api/zcfg_test.go b/pkg/server/application_api/zcfg_test.go index ac1b5ef5969..794a3d9a0b3 100644 --- a/pkg/server/application_api/zcfg_test.go +++ b/pkg/server/application_api/zcfg_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -39,10 +38,9 @@ func TestAdminAPIZoneDetails(t *testing.T) { DefaultTestTenant: base.TODOTestTenantDisabled, }) defer s.Stopper().Stop(context.Background()) - ts := s.(*server.TestServer) // Create database and table. - ac := ts.AmbientCtx() + ac := s.AmbientCtx() ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test") defer span.Finish() setupQueries := []string{ @@ -108,14 +106,14 @@ func TestAdminAPIZoneDetails(t *testing.T) { } // Verify zone matches cluster default. - verifyDbZone(s.(*server.TestServer).Cfg.DefaultZoneConfig, serverpb.ZoneConfigurationLevel_CLUSTER) - verifyTblZone(s.(*server.TestServer).Cfg.DefaultZoneConfig, serverpb.ZoneConfigurationLevel_CLUSTER) + verifyDbZone(s.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) + verifyTblZone(s.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) - databaseID, err := ts.QueryDatabaseID(ctx, username.RootUserName(), "test") + databaseID, err := s.QueryDatabaseID(ctx, username.RootUserName(), "test") if err != nil { t.Fatal(err) } - tableID, err := ts.QueryTableID(ctx, username.RootUserName(), "test", "tbl") + tableID, err := s.QueryTableID(ctx, username.RootUserName(), "test", "tbl") if err != nil { t.Fatal(err) } diff --git a/pkg/server/authserver/authentication_test.go b/pkg/server/authserver/authentication_test.go index f01ed95de09..6361d79125b 100644 --- a/pkg/server/authserver/authentication_test.go +++ b/pkg/server/authserver/authentication_test.go @@ -35,7 +35,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" "github.com/cockroachdb/cockroach/pkg/server/authserver" "github.com/cockroachdb/cockroach/pkg/server/debug" @@ -176,7 +175,7 @@ func TestSSLEnforcement(t *testing.T) { } url := url.URL{ Scheme: tc.ctx.HTTPRequestScheme(), - Host: s.(*server.TestServer).Cfg.HTTPAddr, + Host: s.HTTPAddr(), Path: tc.path, } resp, err := client.Get(url.String()) diff --git a/pkg/server/connectivity_test.go b/pkg/server/connectivity_test.go index 8f8d777c9b7..bd7cfeaa4e4 100644 --- a/pkg/server/connectivity_test.go +++ b/pkg/server/connectivity_test.go @@ -343,7 +343,7 @@ func TestJoinVersionGate(t *testing.T) { if err != nil { t.Fatal(err) } - defer serv.Stop() + defer serv.Stop(context.Background()) ctx := context.Background() if err := serv.Start(ctx); !errors.Is(errors.Cause(err), server.ErrIncompatibleBinaryVersion) { diff --git a/pkg/server/decommission.go b/pkg/server/decommission.go index f4ce6a4fecb..857a4cce205 100644 --- a/pkg/server/decommission.go +++ b/pkg/server/decommission.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server/decommissioning" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" @@ -36,37 +37,18 @@ import ( grpcstatus "google.golang.org/grpc/status" ) -// DecommissioningNodeMap tracks the set of nodes that we know are +// decommissioningNodeMap tracks the set of nodes that we know are // decommissioning. This map is used to inform whether we need to proactively // enqueue some decommissioning node's ranges for rebalancing. -type DecommissioningNodeMap struct { +type decommissioningNodeMap struct { syncutil.RWMutex nodes map[roachpb.NodeID]interface{} } -// DecommissionRangeCheckResult is the result of evaluating the allocator action -// and target for a single range that has an extant replica on a node targeted -// for decommission. -type DecommissionRangeCheckResult struct { - Desc roachpb.RangeDescriptor - Action string - TracingSpans tracingpb.Recording - Err error -} - -// DecommissionPreCheckResult is the result of checking the readiness -// of a node or set of nodes to be decommissioned. -type DecommissionPreCheckResult struct { - RangesChecked int - ReplicasByNode map[roachpb.NodeID][]roachpb.ReplicaIdent - ActionCounts map[string]int - RangesNotReady []DecommissionRangeCheckResult -} - // makeOnNodeDecommissioningCallback returns a callback that enqueues the // decommissioning node's ranges into the `stores`' replicateQueues for // rebalancing. -func (t *DecommissioningNodeMap) makeOnNodeDecommissioningCallback( +func (t *decommissioningNodeMap) makeOnNodeDecommissioningCallback( stores *kvserver.Stores, ) func(id roachpb.NodeID) { return func(decommissioningNodeID roachpb.NodeID) { @@ -125,7 +107,7 @@ func (t *DecommissioningNodeMap) makeOnNodeDecommissioningCallback( } } -func (t *DecommissioningNodeMap) onNodeDecommissioned(nodeID roachpb.NodeID) { +func (t *decommissioningNodeMap) onNodeDecommissioned(nodeID roachpb.NodeID) { t.Lock() defer t.Unlock() // NB: We may have already deleted this node, but that's ok. @@ -165,11 +147,11 @@ func (s *Server) DecommissionPreCheck( strictReadiness bool, collectTraces bool, maxErrors int, -) (DecommissionPreCheckResult, error) { +) (decommissioning.PreCheckResult, error) { // Ensure that if collectTraces is enabled, that a maxErrors >0 is set in // order to avoid unlimited memory usage. if collectTraces && maxErrors <= 0 { - return DecommissionPreCheckResult{}, + return decommissioning.PreCheckResult{}, grpcstatus.Error(codes.InvalidArgument, "MaxErrors must be set to collect traces.") } @@ -177,7 +159,7 @@ func (s *Server) DecommissionPreCheck( decommissionCheckNodeIDs := make(map[roachpb.NodeID]livenesspb.NodeLivenessStatus) replicasByNode := make(map[roachpb.NodeID][]roachpb.ReplicaIdent) actionCounts := make(map[string]int) - var rangeErrors []DecommissionRangeCheckResult + var rangeErrors []decommissioning.RangeCheckResult const pageSize = 10000 for _, nodeID := range nodeIDs { @@ -210,7 +192,7 @@ func (s *Server) DecommissionPreCheck( err = errors.Errorf("n%d has no initialized store", s.NodeID()) } if err != nil { - return DecommissionPreCheckResult{}, grpcstatus.Error(codes.NotFound, err.Error()) + return decommissioning.PreCheckResult{}, grpcstatus.Error(codes.NotFound, err.Error()) } // Define our node liveness overrides to simulate that the nodeIDs for which @@ -274,10 +256,10 @@ func (s *Server) DecommissionPreCheck( }) if err != nil { - return DecommissionPreCheckResult{}, grpcstatus.Errorf(codes.Internal, err.Error()) + return decommissioning.PreCheckResult{}, grpcstatus.Errorf(codes.Internal, err.Error()) } - return DecommissionPreCheckResult{ + return decommissioning.PreCheckResult{ RangesChecked: rangesChecked, ReplicasByNode: replicasByNode, ActionCounts: actionCounts, @@ -295,8 +277,8 @@ func evaluateRangeCheckResult( action allocatorimpl.AllocatorAction, recording tracingpb.Recording, rErr error, -) (passed bool, _ DecommissionRangeCheckResult) { - checkResult := DecommissionRangeCheckResult{ +) (passed bool, _ decommissioning.RangeCheckResult) { + checkResult := decommissioning.RangeCheckResult{ Desc: *desc, Action: action.String(), Err: rErr, diff --git a/pkg/server/decommissioning/BUILD.bazel b/pkg/server/decommissioning/BUILD.bazel new file mode 100644 index 00000000000..81cf95f4a53 --- /dev/null +++ b/pkg/server/decommissioning/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "decommissioning", + srcs = ["decommissioning.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/server/decommissioning", + visibility = ["//visibility:public"], + deps = [ + "//pkg/roachpb", + "//pkg/util/tracing/tracingpb", + ], +) diff --git a/pkg/server/decommissioning/decommissioning.go b/pkg/server/decommissioning/decommissioning.go new file mode 100644 index 00000000000..f514bf9f2d4 --- /dev/null +++ b/pkg/server/decommissioning/decommissioning.go @@ -0,0 +1,35 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package decommissioning + +import ( + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb" +) + +// RangeCheckResult is the result of evaluating the allocator action +// and target for a single range that has an extant replica on a node targeted +// for decommission. +type RangeCheckResult struct { + Desc roachpb.RangeDescriptor + Action string + TracingSpans tracingpb.Recording + Err error +} + +// PreCheckResult is the result of checking the readiness +// of a node or set of nodes to be decommissioned. +type PreCheckResult struct { + RangesChecked int + ReplicasByNode map[roachpb.NodeID][]roachpb.ReplicaIdent + ActionCounts map[string]int + RangesNotReady []RangeCheckResult +} diff --git a/pkg/server/diagnostics/update_checker_test.go b/pkg/server/diagnostics/update_checker_test.go index 65ee00a76bb..6a17173bcd6 100644 --- a/pkg/server/diagnostics/update_checker_test.go +++ b/pkg/server/diagnostics/update_checker_test.go @@ -55,7 +55,7 @@ func TestCheckVersion(t *testing.T) { require.Equal(t, 1, r.NumRequests()) last := r.LastRequestData() - require.Equal(t, s.(*server.TestServer).StorageClusterID().String(), last.UUID) + require.Equal(t, s.StorageLayer().StorageClusterID().String(), last.UUID) require.Equal(t, "system", last.TenantID) require.Equal(t, build.GetInfo().Tag, last.Version) require.Equal(t, "OSS", last.LicenseType) diff --git a/pkg/server/grpc_server.go b/pkg/server/grpc_server.go index 8e5e9c1545c..c9029231cea 100644 --- a/pkg/server/grpc_server.go +++ b/pkg/server/grpc_server.go @@ -11,9 +11,12 @@ package server import ( + "context" "sync/atomic" "github.com/cockroachdb/cockroach/pkg/rpc" + "github.com/cockroachdb/cockroach/pkg/server/srverrors" + "github.com/cockroachdb/errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" @@ -68,6 +71,22 @@ func (s *grpcServer) operational() bool { return sMode == modeOperational || sMode == modeDraining } +func (s *grpcServer) health(ctx context.Context) error { + sm := s.mode.get() + switch sm { + case modeInitializing: + return grpcstatus.Error(codes.Unavailable, "node is waiting for cluster initialization") + case modeDraining: + // grpc.mode is set to modeDraining when the Drain(DrainMode_CLIENT) has + // been called (client connections are to be drained). + return grpcstatus.Errorf(codes.Unavailable, "node is shutting down") + case modeOperational: + return nil + default: + return srverrors.ServerError(ctx, errors.Newf("unknown mode: %v", sm)) + } +} + var rpcsAllowedWhileBootstrapping = map[string]struct{}{ "/cockroach.rpc.Heartbeat/Ping": {}, "/cockroach.gossip.Gossip/Gossip": {}, diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 4926315e094..e63f9419a89 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -290,14 +290,17 @@ func TestCorruptedClusterID(t *testing.T) { // And that UpdatedAt has increased. // The latest actual stats are returned. func compareNodeStatus( - t *testing.T, ts *TestServer, expectedNodeStatus *statuspb.NodeStatus, testNumber int, + t *testing.T, + ts serverutils.TestServerInterface, + expectedNodeStatus *statuspb.NodeStatus, + testNumber int, ) *statuspb.NodeStatus { // ======================================== // Read NodeStatus from server and validate top-level fields. // ======================================== - nodeStatusKey := keys.NodeStatusKey(ts.node.Descriptor.NodeID) + nodeStatusKey := keys.NodeStatusKey(ts.NodeID()) nodeStatus := &statuspb.NodeStatus{} - if err := ts.db.GetProto(context.Background(), nodeStatusKey, nodeStatus); err != nil { + if err := ts.DB().GetProto(context.Background(), nodeStatusKey, nodeStatus); err != nil { t.Fatalf("%d: failure getting node status: %s", testNumber, err) } @@ -409,15 +412,15 @@ func TestNodeStatusWritten(t *testing.T) { // ======================================== // Start test server and wait for full initialization. // ======================================== - srv, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ - DisableEventLog: true, + ts, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + DisableEventLog: true, }) - defer srv.Stopper().Stop(context.Background()) - ts := srv.(*TestServer) + defer ts.Stopper().Stop(context.Background()) ctx := context.Background() // Retrieve the first store from the Node. - s, err := ts.node.stores.GetStore(roachpb.StoreID(1)) + s, err := ts.GetStores().(*kvserver.Stores).GetStore(roachpb.StoreID(1)) if err != nil { t.Fatal(err) } @@ -433,7 +436,9 @@ func TestNodeStatusWritten(t *testing.T) { } // Wait for full replication of initial ranges. - initialRanges, err := ExpectedInitialRangeCount(keys.SystemSQLCodec, &ts.cfg.DefaultZoneConfig, &ts.cfg.DefaultSystemZoneConfig) + zcfg := ts.DefaultZoneConfig() + szcfg := ts.DefaultSystemZoneConfig() + initialRanges, err := ExpectedInitialRangeCount(keys.SystemSQLCodec, &zcfg, &szcfg) if err != nil { t.Fatal(err) } @@ -451,7 +456,7 @@ func TestNodeStatusWritten(t *testing.T) { // status produced by the server. // ======================================== expectedNodeStatus := &statuspb.NodeStatus{ - Desc: ts.node.Descriptor, + Desc: ts.Node().(*Node).Descriptor, StartedAt: 0, UpdatedAt: 0, Metrics: map[string]float64{ @@ -461,7 +466,7 @@ func TestNodeStatusWritten(t *testing.T) { } expectedStoreStatuses := make(map[roachpb.StoreID]statuspb.StoreStatus) - if err := ts.node.stores.VisitStores(func(s *kvserver.Store) error { + if err := ts.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { desc, err := s.Descriptor(ctx, false /* useCached */) if err != nil { t.Fatal(err) @@ -496,7 +501,7 @@ func TestNodeStatusWritten(t *testing.T) { // were multiple replicas, more care would need to be taken in the initial // syncFeed(). forceWriteStatus := func() { - if err := ts.node.computeMetricsPeriodically(ctx, map[*kvserver.Store]*storage.MetricsForInterval{}, 0); err != nil { + if err := ts.Node().(*Node).computeMetricsPeriodically(ctx, map[*kvserver.Store]*storage.MetricsForInterval{}, 0); err != nil { t.Fatalf("error publishing store statuses: %s", err) } @@ -520,10 +525,10 @@ func TestNodeStatusWritten(t *testing.T) { rightKey := "c" // Write some values left and right of the proposed split key. - if err := ts.db.Put(ctx, leftKey, content); err != nil { + if err := kvDB.Put(ctx, leftKey, content); err != nil { t.Fatal(err) } - if err := ts.db.Put(ctx, rightKey, content); err != nil { + if err := kvDB.Put(ctx, rightKey, content); err != nil { t.Fatal(err) } @@ -550,7 +555,7 @@ func TestNodeStatusWritten(t *testing.T) { // ======================================== // Split the range. - if err := ts.db.AdminSplit( + if err := kvDB.AdminSplit( context.Background(), splitKey, hlc.MaxTimestamp, /* expirationTime */ @@ -560,10 +565,10 @@ func TestNodeStatusWritten(t *testing.T) { // Write on both sides of the split to ensure that the raft machinery // is running. - if err := ts.db.Put(ctx, leftKey, content); err != nil { + if err := kvDB.Put(ctx, leftKey, content); err != nil { t.Fatal(err) } - if err := ts.db.Put(ctx, rightKey, content); err != nil { + if err := kvDB.Put(ctx, rightKey, content); err != nil { t.Fatal(err) } @@ -669,7 +674,7 @@ func TestNodeBatchRequestPProfLabels(t *testing.T) { defer log.Scope(t).Close(t) observedProfileLabels := make(map[string]string) - srv := serverutils.StartServerOnly(t, base.TestServerArgs{ + ts := serverutils.StartServerOnly(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ TestingResponseFilter: func(ctx context.Context, ba *kvpb.BatchRequest, _ *kvpb.BatchResponse) *kvpb.Error { @@ -693,9 +698,8 @@ func TestNodeBatchRequestPProfLabels(t *testing.T) { }, }, }) - defer srv.Stopper().Stop(context.Background()) - ts := srv.(*TestServer) - n := ts.GetNode() + defer ts.Stopper().Stop(context.Background()) + n := ts.Node().(*Node) var ba kvpb.BatchRequest ba.RangeID = 1 @@ -732,11 +736,10 @@ func TestNodeBatchRequestMetricsInc(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) - ts := srv.(*TestServer) + ts := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer ts.Stopper().Stop(context.Background()) - n := ts.GetNode() + n := ts.Node().(*Node) bCurr := n.metrics.BatchCount.Count() getCurr := n.metrics.MethodCounts[kvpb.Get].Count() putCurr := n.metrics.MethodCounts[kvpb.Put].Count() diff --git a/pkg/server/purge_auth_session_test.go b/pkg/server/purge_auth_session_test.go index 30a68e82ede..aa82fc44185 100644 --- a/pkg/server/purge_auth_session_test.go +++ b/pkg/server/purge_auth_session_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/authserver" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -30,10 +31,9 @@ func TestPurgeSession(t *testing.T) { ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - ts := s.(*TestServer) + ts := s.ApplicationLayer() userName := username.TestUserName() if err := ts.CreateAuthUser(userName, false /* isAdmin */); err != nil { t.Fatal(err) @@ -49,7 +49,7 @@ func TestPurgeSession(t *testing.T) { t.Fatal(err) } - settingsValues := &ts.st.SV + settingsValues := &ts.ClusterSettings().SV var ( purgeTTL = webSessionPurgeTTL.Get(settingsValues) ) @@ -62,7 +62,7 @@ VALUES($1, $2, $3, $4, (SELECT user_id FROM system.users WHERE username = $2)) // Each iteration of the loop inserts a session, rewinding the age of // the given timestamp column with each iteration. insertOldSessions := func(column string) { - currTime := ts.clock.PhysicalTime() + currTime := ts.Clock().PhysicalTime() // Initialize each timestamp column at the current time. expiresAt, revokedAt := currTime, currTime @@ -85,7 +85,7 @@ VALUES($1, $2, $3, $4, (SELECT user_id FROM system.users WHERE username = $2)) durationSinceRevocation := purgeTTL + margin revokedAt = revokedAt.Add(durationSinceRevocation * time.Duration(-1)) } - if _, err = ts.sqlServer.internalExecutor.QueryRowEx( + if _, err = ts.InternalExecutor().(isql.Executor).QueryRowEx( ctx, "add-session", nil, /* txn */ @@ -111,7 +111,7 @@ VALUES($1, $2, $3, $4, (SELECT user_id FROM system.users WHERE username = $2)) purgeOldSessions := func() { systemLogsToGC := getTablesToGC() - runSystemLogGC(ctx, ts.sqlServer, ts.Cfg.Settings, systemLogsToGC) + runSystemLogGC(ctx, ts.SQLServerInternal().(*SQLServer), ts.ClusterSettings(), systemLogsToGC) } // Check deletion for old expired sessions. diff --git a/pkg/server/server.go b/pkg/server/server.go index 7147690251e..348f9060b48 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -163,7 +163,7 @@ type Server struct { admin *systemAdminServer status *systemStatusServer drain *drainServer - decomNodeMap *DecommissioningNodeMap + decomNodeMap *decommissioningNodeMap authentication authserver.Server migrationServer *migrationServer tsDB *ts.DB @@ -486,7 +486,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { stores := kvserver.NewStores(cfg.AmbientCtx, clock) - decomNodeMap := &DecommissioningNodeMap{ + decomNodeMap := &decommissioningNodeMap{ nodes: make(map[roachpb.NodeID]interface{}), } nodeLiveness := liveness.NewNodeLiveness(liveness.NodeLivenessOptions{ @@ -2229,15 +2229,6 @@ func (s *Server) AcceptInternalClients(ctx context.Context) error { }) } -// Stop shuts down this server instance. Note that this method exists -// solely for the benefit of the `\demo shutdown` command in -// `cockroach demo`. It is not called as part of the regular server -// shutdown sequence; for this, see cli/start.go and the Drain() -// RPC. -func (s *Server) Stop() { - s.stopper.Stop(context.Background()) -} - // ShutdownRequested returns a channel that is signaled when a subsystem wants // the server to be shut down. func (s *Server) ShutdownRequested() <-chan ShutdownRequest { diff --git a/pkg/server/server_internal_executor_factory_test.go b/pkg/server/server_internal_executor_factory_test.go index 01750eefc0c..1ec2fefc348 100644 --- a/pkg/server/server_internal_executor_factory_test.go +++ b/pkg/server/server_internal_executor_factory_test.go @@ -28,10 +28,11 @@ func TestInternalExecutorClearsMonitorMemory(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() - mon := s.(*TestServer).sqlServer.internalDBMemMonitor + mon := s.SQLServerInternal().(*SQLServer).internalDBMemMonitor ief := s.ExecutorConfig().(sql.ExecutorConfig).InternalDB sessionData := sql.NewInternalSessionData(ctx, s.ClusterSettings(), "TestInternalExecutorClearsMonitorMemory") ie := ief.NewInternalExecutor(sessionData) @@ -40,6 +41,6 @@ func TestInternalExecutorClearsMonitorMemory(t *testing.T) { require.Greater(t, mon.AllocBytes(), int64(0)) err = rows.Close() require.NoError(t, err) - s.Stopper().Stop(ctx) + srv.Stopper().Stop(ctx) require.Equal(t, mon.AllocBytes(), int64(0)) } diff --git a/pkg/server/server_special_test.go b/pkg/server/server_special_test.go new file mode 100644 index 00000000000..e3a1b484a03 --- /dev/null +++ b/pkg/server/server_special_test.go @@ -0,0 +1,138 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package server + +import ( + "context" + "math" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/netutil/addr" + pgx "github.com/jackc/pgx/v4" + "github.com/stretchr/testify/require" +) + +// Tests in this file have a linter exception against casting to *TestServer. + +func TestPanicRecovery(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.ScopeWithoutShowLogs(t).Close(t) + + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(context.Background()) + ts := s.(*TestServer) + + // Enable a test-only endpoint that induces a panic. + ts.http.mux.Handle("/panic", http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + panic("induced panic for testing") + })) + + // Create a request. + req, err := http.NewRequest(http.MethodGet, ts.AdminURL().WithPath("/panic").String(), nil /* body */) + require.NoError(t, err) + + // Create a ResponseRecorder to record the response. + rr := httptest.NewRecorder() + require.NotPanics(t, func() { + ts.http.baseHandler(rr, req) + }) + + // Check that the status code is correct. + require.Equal(t, http.StatusInternalServerError, rr.Code) + + // Check that the panic has been reported. + entries, err := log.FetchEntriesFromFiles( + 0, /* startTimestamp */ + math.MaxInt64, + 10000, /* maxEntries */ + regexp.MustCompile("a panic has occurred!"), + log.WithMarkedSensitiveData) + require.NoError(t, err) + require.NotEmpty(t, entries, "no log entries matching the regexp") +} + +// TestSocketAutoNumbering checks that a socket name +// ending with `.0` in the input config gets auto-assigned +// the actual TCP port number. +func TestSocketAutoNumbering(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + socketName := "foo.0" + // We need a temp directory in which we'll create the unix socket. + // On BSD, binding to a socket is limited to a path length of 104 characters + // (including the NUL terminator). In glibc, this limit is 108 characters. + // macOS has a tendency to produce very long temporary directory names, so + // we are careful to keep all the constants involved short. + baseTmpDir := os.TempDir() + if len(baseTmpDir) >= 104-1-len(socketName)-1-4-len("TestSocketAutoNumbering")-10 { + t.Logf("temp dir name too long: %s", baseTmpDir) + t.Logf("using /tmp instead.") + // Note: /tmp might fail in some systems, that's why we still prefer + // os.TempDir() if available. + baseTmpDir = "/tmp" + } + tempDir, err := os.MkdirTemp(baseTmpDir, "TestSocketAutoNumbering") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tempDir) }() + + socketFile := filepath.Join(tempDir, socketName) + + ctx := context.Background() + + params := base.TestServerArgs{ + Insecure: true, + SocketFile: socketFile, + } + s := serverutils.StartServerOnly(t, params) + defer s.Stopper().Stop(ctx) + + _, expectedPort, err := addr.SplitHostPort(s.SQLAddr(), "") + require.NoError(t, err) + + if socketPath := s.(*TestServer).Cfg.SocketFile; !strings.HasSuffix(socketPath, "."+expectedPort) { + t.Errorf("expected unix socket ending with port %q, got %q", expectedPort, socketPath) + } +} + +// Test that connections using the internal SQL loopback listener work. +func TestInternalSQL(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + + conf, err := pgx.ParseConfig("") + require.NoError(t, err) + conf.User = "root" + // Configure pgx to connect on the loopback listener. + conf.DialFunc = func(ctx context.Context, network, addr string) (net.Conn, error) { + return s.(*TestServer).Server.loopbackPgL.Connect(ctx) + } + conn, err := pgx.ConnectConfig(ctx, conf) + require.NoError(t, err) + // Run a random query to check that it all works. + r := conn.QueryRow(ctx, "SELECT count(*) FROM system.sqlliveness") + var count int + require.NoError(t, r.Scan(&count)) +} diff --git a/pkg/server/server_systemlog_gc_test.go b/pkg/server/server_systemlog_gc_test.go index 1835378bf54..9cd5c40d0cc 100644 --- a/pkg/server/server_systemlog_gc_test.go +++ b/pkg/server/server_systemlog_gc_test.go @@ -34,11 +34,13 @@ func TestLogGC(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1 min under race") - a := assert.New(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ts := s.(*TestServer) ctx := context.Background() - defer s.Stopper().Stop(ctx) + a := assert.New(t) + ts, db, _ := serverutils.StartServer(t, base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + }) + defer ts.Stopper().Stop(ctx) + const testRangeID = 10001 const table = "rangelog" @@ -93,7 +95,7 @@ func TestLogGC(t *testing.T) { gc := func(ctx context.Context, table string, tsLow, tsHigh time.Time) (time.Time, int64, error) { return gcSystemLog(ctx, - ts.sqlServer, + ts.SQLServerInternal().(*SQLServer), "test", table, "timestamp", tsLow, tsHigh, 1000) } diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 4c2c77a8c3d..9f8aaf3303e 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -16,15 +16,12 @@ import ( "context" "fmt" "io" - "math" - "net" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" - "regexp" "strings" "testing" "testing/fstest" @@ -55,7 +52,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/netutil/addr" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -63,7 +59,6 @@ import ( "github.com/gogo/protobuf/jsonpb" "github.com/gogo/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" - pgx "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -84,43 +79,6 @@ func TestSelfBootstrap(t *testing.T) { } } -func TestPanicRecovery(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.ScopeWithoutShowLogs(t).Close(t) - - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - ts := s.(*TestServer) - - // Enable a test-only endpoint that induces a panic. - ts.http.mux.Handle("/panic", http.HandlerFunc(func(http.ResponseWriter, *http.Request) { - panic("induced panic for testing") - })) - - // Create a request. - req, err := http.NewRequest(http.MethodGet, ts.AdminURL().WithPath("/panic").String(), nil /* body */) - require.NoError(t, err) - - // Create a ResponseRecorder to record the response. - rr := httptest.NewRecorder() - require.NotPanics(t, func() { - ts.http.baseHandler(rr, req) - }) - - // Check that the status code is correct. - require.Equal(t, http.StatusInternalServerError, rr.Code) - - // Check that the panic has been reported. - entries, err := log.FetchEntriesFromFiles( - 0, /* startTimestamp */ - math.MaxInt64, - 10000, /* maxEntries */ - regexp.MustCompile("a panic has occurred!"), - log.WithMarkedSensitiveData) - require.NoError(t, err) - require.NotEmpty(t, entries, "no log entries matching the regexp") -} - // TestHealthCheck runs a basic sanity check on the health checker. func TestHealthCheck(t *testing.T) { defer leaktest.AfterTest(t)() @@ -139,7 +97,7 @@ func TestHealthCheck(t *testing.T) { ctx := context.Background() - recorder := s.(*TestServer).Server.recorder + recorder := s.MetricsRecorder() { summary := *recorder.GenerateNodeStatus(ctx) @@ -149,7 +107,7 @@ func TestHealthCheck(t *testing.T) { } } - store, err := s.(*TestServer).Server.node.stores.GetStore(1) + store, err := s.GetStores().(*kvserver.Stores).GetStore(1) if err != nil { t.Fatal(err) } @@ -286,11 +244,11 @@ func TestPlainHTTPServer(t *testing.T) { func TestSecureHTTPRedirect(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - ts := s.(*TestServer) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(context.Background()) + ts := srv.ApplicationLayer() - httpClient, err := s.GetUnauthenticatedHTTPClient() + httpClient, err := ts.GetUnauthenticatedHTTPClient() if err != nil { t.Fatal(err) } @@ -299,8 +257,8 @@ func TestSecureHTTPRedirect(t *testing.T) { return http.ErrUseLastResponse } - origURL := "http://" + ts.Cfg.HTTPAddr - expURL := url.URL{Scheme: "https", Host: ts.Cfg.HTTPAddr, Path: "/"} + origURL := "http://" + ts.HTTPAddr() + expURL := url.URL{Scheme: "https", Host: ts.HTTPAddr(), Path: "/"} if resp, err := httpClient.Get(origURL); err != nil { t.Fatal(err) @@ -746,13 +704,13 @@ func TestServeIndexHTML(t *testing.T) { } t.Run("Insecure mode", func(t *testing.T) { - s := serverutils.StartServerOnly(t, base.TestServerArgs{ + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ Insecure: true, }) - defer s.Stopper().Stop(ctx) - tsrv := s.(*TestServer) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() - client, err := tsrv.GetUnauthenticatedHTTPClient() + client, err := s.GetUnauthenticatedHTTPClient() require.NoError(t, err) t.Run("short build", func(t *testing.T) { @@ -813,13 +771,13 @@ Binary built without web UI. t.Run("Secure mode", func(t *testing.T) { linkInFakeUI() defer unlinkFakeUI() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - tsrv := s.(*TestServer) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() - loggedInClient, err := tsrv.GetAdminHTTPClient() + loggedInClient, err := s.GetAdminHTTPClient() require.NoError(t, err) - loggedOutClient, err := tsrv.GetUnauthenticatedHTTPClient() + loggedOutClient, err := s.GetUnauthenticatedHTTPClient() require.NoError(t, err) cases := []struct { @@ -896,13 +854,13 @@ Binary built without web UI. ui.Assets = nil }() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - tsrv := s.(*TestServer) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() - loggedInClient, err := tsrv.GetAdminHTTPClient() + loggedInClient, err := s.GetAdminHTTPClient() require.NoError(t, err) - loggedOutClient, err := tsrv.GetUnauthenticatedHTTPClient() + loggedOutClient, err := s.GetUnauthenticatedHTTPClient() require.NoError(t, err) cases := []struct { @@ -1163,70 +1121,3 @@ func Test_makeFakeNodeStatuses(t *testing.T) { }) } } - -// TestSocketAutoNumbering checks that a socket name -// ending with `.0` in the input config gets auto-assigned -// the actual TCP port number. -func TestSocketAutoNumbering(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - socketName := "foo.0" - // We need a temp directory in which we'll create the unix socket. - // On BSD, binding to a socket is limited to a path length of 104 characters - // (including the NUL terminator). In glibc, this limit is 108 characters. - // macOS has a tendency to produce very long temporary directory names, so - // we are careful to keep all the constants involved short. - baseTmpDir := os.TempDir() - if len(baseTmpDir) >= 104-1-len(socketName)-1-4-len("TestSocketAutoNumbering")-10 { - t.Logf("temp dir name too long: %s", baseTmpDir) - t.Logf("using /tmp instead.") - // Note: /tmp might fail in some systems, that's why we still prefer - // os.TempDir() if available. - baseTmpDir = "/tmp" - } - tempDir, err := os.MkdirTemp(baseTmpDir, "TestSocketAutoNumbering") - require.NoError(t, err) - defer func() { _ = os.RemoveAll(tempDir) }() - - socketFile := filepath.Join(tempDir, socketName) - - ctx := context.Background() - - params := base.TestServerArgs{ - Insecure: true, - SocketFile: socketFile, - } - s := serverutils.StartServerOnly(t, params) - defer s.Stopper().Stop(ctx) - - _, expectedPort, err := addr.SplitHostPort(s.SQLAddr(), "") - require.NoError(t, err) - - if socketPath := s.(*TestServer).Cfg.SocketFile; !strings.HasSuffix(socketPath, "."+expectedPort) { - t.Errorf("expected unix socket ending with port %q, got %q", expectedPort, socketPath) - } -} - -// Test that connections using the internal SQL loopback listener work. -func TestInternalSQL(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - ctx := context.Background() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - - conf, err := pgx.ParseConfig("") - require.NoError(t, err) - conf.User = "root" - // Configure pgx to connect on the loopback listener. - conf.DialFunc = func(ctx context.Context, network, addr string) (net.Conn, error) { - return s.(*TestServer).Server.loopbackPgL.Connect(ctx) - } - conn, err := pgx.ConnectConfig(ctx, conf) - require.NoError(t, err) - // Run a random query to check that it all works. - r := conn.QueryRow(ctx, "SELECT count(*) FROM system.sqlliveness") - var count int - require.NoError(t, r.Scan(&count)) -} diff --git a/pkg/server/settings_cache_test.go b/pkg/server/settings_cache_test.go index a3160d7165b..d56a7b4370b 100644 --- a/pkg/server/settings_cache_test.go +++ b/pkg/server/settings_cache_test.go @@ -68,6 +68,7 @@ func TestCachedSettingsServerRestart(t *testing.T) { defer stickyEngineRegistry.CloseAllStickyInMemEngines() serverArgs := base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, StoreSpecs: []base.StoreSpec{ {InMemory: true, StickyInMemoryEngineID: "1"}, }, @@ -98,28 +99,26 @@ func TestCachedSettingsServerRestart(t *testing.T) { }) testServer.Stopper().Stop(context.Background()) - ts, err := serverutils.NewServer(serverArgs) + s, err := serverutils.NewServer(serverArgs) if err != nil { t.Fatal(err) } - srv := ts.(*TestServer) - defer srv.Stopper().Stop(context.Background()) + defer s.Stopper().Stop(context.Background()) - s := srv.Server var initServer *initServer { - getDialOpts := s.rpcContext.GRPCDialOptions + getDialOpts := s.RPCContext().GRPCDialOptions - initConfig := newInitServerConfig(ctx, s.cfg, getDialOpts) + initConfig := newInitServerConfig(ctx, s.(*TestServer).Server.cfg, getDialOpts) inspectState, err := inspectEngines( context.Background(), - s.engines, - s.cfg.Settings.Version.BinaryVersion(), - s.cfg.Settings.Version.BinaryMinSupportedVersion(), + s.Engines(), + s.ClusterSettings().Version.BinaryVersion(), + s.ClusterSettings().Version.BinaryMinSupportedVersion(), ) require.NoError(t, err) - initServer = newInitServer(s.cfg.AmbientCtx, inspectState, initConfig) + initServer = newInitServer(s.AmbientCtx(), inspectState, initConfig) } // ServeAndWait should return immediately since the server is already initialized @@ -128,8 +127,8 @@ func TestCachedSettingsServerRestart(t *testing.T) { testutils.SucceedsSoon(t, func() error { state, initialBoot, err := initServer.ServeAndWait( context.Background(), - s.stopper, - &s.cfg.Settings.SV, + s.Stopper(), + &s.ClusterSettings().SV, ) if err != nil { return err diff --git a/pkg/server/span_stats_test.go b/pkg/server/span_stats_test.go index 4721ff64ddc..db561a3e693 100644 --- a/pkg/server/span_stats_test.go +++ b/pkg/server/span_stats_test.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -78,9 +78,9 @@ func TestSpanStatsFanOut(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - s := tc.Server(0).(*server.TestServer) + s := tc.Server(0) - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) // Create a number of ranges using splits. splitKeys := []string{"a", "c", "e", "g", "i"} diff --git a/pkg/server/storage_api/BUILD.bazel b/pkg/server/storage_api/BUILD.bazel index 2733923e221..740fd347f50 100644 --- a/pkg/server/storage_api/BUILD.bazel +++ b/pkg/server/storage_api/BUILD.bazel @@ -44,6 +44,7 @@ go_test( "//pkg/security/username", "//pkg/server", "//pkg/server/apiconstants", + "//pkg/server/decommissioning", "//pkg/server/serverpb", "//pkg/server/srvtestutils", "//pkg/server/status/statuspb", diff --git a/pkg/server/storage_api/decommission_test.go b/pkg/server/storage_api/decommission_test.go index 9cb09ee2a7a..1395a0cdea7 100644 --- a/pkg/server/storage_api/decommission_test.go +++ b/pkg/server/storage_api/decommission_test.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/decommissioning" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -57,7 +57,7 @@ func TestDecommissionPreCheckInvalid(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - firstSvr := tc.Server(0).(*server.TestServer) + firstSvr := tc.Server(0) // Create database and tables. ac := firstSvr.AmbientCtx() @@ -73,7 +73,7 @@ func TestDecommissionPreCheckInvalid(t *testing.T) { status, ok := status.FromError(err) require.True(t, ok, "expected grpc status error") require.Equal(t, codes.InvalidArgument, status.Code()) - require.Equal(t, server.DecommissionPreCheckResult{}, result) + require.Equal(t, decommissioning.PreCheckResult{}, result) } // TestDecommissionPreCheckEvaluation tests evaluation of decommission readiness @@ -103,7 +103,7 @@ func TestDecommissionPreCheckEvaluation(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - firstSvr := tc.Server(0).(*server.TestServer) + firstSvr := tc.Server(0) db := tc.ServerConn(0) runQueries := func(queries ...string) { for _, q := range queries { @@ -214,7 +214,7 @@ func TestDecommissionPreCheckOddToEven(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - firstSvr := tc.Server(0).(*server.TestServer) + firstSvr := tc.Server(0) db := tc.ServerConn(0) runQueries := func(queries ...string) { for _, q := range queries { diff --git a/pkg/server/storage_api/files_test.go b/pkg/server/storage_api/files_test.go index 0b1ba76302c..0bed46331de 100644 --- a/pkg/server/storage_api/files_test.go +++ b/pkg/server/storage_api/files_test.go @@ -19,7 +19,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -37,12 +36,11 @@ func TestStatusGetFiles(t *testing.T) { storeSpec := base.StoreSpec{Path: tempDir} - tsI := serverutils.StartServerOnly(t, base.TestServerArgs{ + ts := serverutils.StartServerOnly(t, base.TestServerArgs{ StoreSpecs: []base.StoreSpec{ storeSpec, }, }) - ts := tsI.(*server.TestServer) defer ts.Stopper().Stop(context.Background()) client := ts.GetStatusClient(t) diff --git a/pkg/server/storage_api/health_test.go b/pkg/server/storage_api/health_test.go index a053558bf41..8972b6759d0 100644 --- a/pkg/server/storage_api/health_test.go +++ b/pkg/server/storage_api/health_test.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -36,60 +35,72 @@ func TestHealthAPI(t *testing.T) { ctx := context.Background() - s := serverutils.StartServerOnly(t, base.TestServerArgs{ - // Disable the default test tenant for now as this tests fails - // with it enabled. Tracked with #81590. - DefaultTestTenant: base.TODOTestTenantDisabled, - }) - defer s.Stopper().Stop(ctx) - ts := s.(*server.TestServer) + t.Run("sql", func(t *testing.T) { + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + ts := s.ApplicationLayer() - // We need to retry because the node ID isn't set until after - // bootstrapping. - testutils.SucceedsSoon(t, func() error { + // We need to retry because the node ID isn't set until after + // bootstrapping. + testutils.SucceedsSoon(t, func() error { + var resp serverpb.HealthResponse + return srvtestutils.GetAdminJSONProto(ts, "health", &resp) + }) + + // Make the SQL listener appear unavailable. Verify that health fails after that. + ts.SetReady(false) var resp serverpb.HealthResponse - return srvtestutils.GetAdminJSONProto(s, "health", &resp) + err := srvtestutils.GetAdminJSONProto(ts, "health?ready=1", &resp) + if err == nil { + t.Error("server appears ready even though SQL listener is not") + } + ts.SetReady(true) + err = srvtestutils.GetAdminJSONProto(ts, "health?ready=1", &resp) + if err != nil { + t.Errorf("server not ready after SQL listener is ready again: %v", err) + } }) - // Make the SQL listener appear unavailable. Verify that health fails after that. - ts.TestingSetReady(false) - var resp serverpb.HealthResponse - err := srvtestutils.GetAdminJSONProto(s, "health?ready=1", &resp) - if err == nil { - t.Error("server appears ready even though SQL listener is not") - } - ts.TestingSetReady(true) - err = srvtestutils.GetAdminJSONProto(s, "health?ready=1", &resp) - if err != nil { - t.Errorf("server not ready after SQL listener is ready again: %v", err) - } - - // Expire this node's liveness record by pausing heartbeats and advancing the - // server's clock. - nl := ts.NodeLiveness().(*liveness.NodeLiveness) - defer nl.PauseAllHeartbeatsForTest()() - self, ok := nl.Self() - assert.True(t, ok) - s.Clock().Update(self.Expiration.ToTimestamp().Add(1, 0).UnsafeToClockTimestamp()) + t.Run("liveness", func(t *testing.T) { + s := serverutils.StartServerOnly(t, base.TestServerArgs{ + DefaultTestTenant: base.TestIsSpecificToStorageLayerAndNeedsASystemTenant, + }) + defer s.Stopper().Stop(ctx) - testutils.SucceedsSoon(t, func() error { - err := srvtestutils.GetAdminJSONProto(s, "health?ready=1", &resp) - if err == nil { - return errors.New("health OK, still waiting for unhealth") + // Pre-warm the web session cookie for this server before the + // actual test below. + var resp serverpb.HealthResponse + if err := srvtestutils.GetAdminJSONProto(s, "health", &resp); err != nil { + t.Fatal(err) } - t.Logf("observed error: %v", err) - if !testutils.IsError(err, `(?s)503 Service Unavailable.*"error": "node is not healthy"`) { - return err + // Expire this node's liveness record by pausing heartbeats and advancing the + // server's clock. + nl := s.NodeLiveness().(*liveness.NodeLiveness) + defer nl.PauseAllHeartbeatsForTest()() + self, ok := nl.Self() + assert.True(t, ok) + s.Clock().Update(self.Expiration.ToTimestamp().Add(1, 0).UnsafeToClockTimestamp()) + + testutils.SucceedsSoon(t, func() error { + err := srvtestutils.GetAdminJSONProto(s, "health?ready=1", &resp) + if err == nil { + return errors.New("health OK, still waiting for unhealth") + } + + t.Logf("observed error: %v", err) + if !testutils.IsError(err, `(?s)503 Service Unavailable.*"error": "node is not healthy"`) { + return err + } + return nil + }) + + // After the node reports an error with `?ready=1`, the health + // endpoint must still succeed without error when `?ready=1` is not specified. + if err := srvtestutils.GetAdminJSONProto(s, "health", &resp); err != nil { + t.Fatal(err) } - return nil }) - - // After the node reports an error with `?ready=1`, the health - // endpoint must still succeed without error when `?ready=1` is not specified. - if err := srvtestutils.GetAdminJSONProto(s, "health", &resp); err != nil { - t.Fatal(err) - } } func TestLivenessAPI(t *testing.T) { diff --git a/pkg/server/storage_api/logfiles_test.go b/pkg/server/storage_api/logfiles_test.go index 39c49aa00c3..34cf841428e 100644 --- a/pkg/server/storage_api/logfiles_test.go +++ b/pkg/server/storage_api/logfiles_test.go @@ -198,17 +198,15 @@ func TestStatusLocalLogsTenantFilter(t *testing.T) { skip.IgnoreLint(t, "Test only works with low verbosity levels") } - s := log.ScopeWithoutShowLogs(t) - defer s.Close(t) + sc := log.ScopeWithoutShowLogs(t) + defer sc.Close(t) // This test cares about the number of output files. Ensure // there's just one. - defer s.SetupSingleFileLogging()() + defer sc.SetupSingleFileLogging()() - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) - - ts := srv.(*server.TestServer) + ts := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer ts.Stopper().Stop(context.Background()) appTenantID := roachpb.MustMakeTenantID(uint64(2)) ctxSysTenant, ctxAppTenant := server.TestingMakeLoggingContexts(appTenantID) diff --git a/pkg/server/storage_api/nodes_test.go b/pkg/server/storage_api/nodes_test.go index 547285b3762..6ab3309165f 100644 --- a/pkg/server/storage_api/nodes_test.go +++ b/pkg/server/storage_api/nodes_test.go @@ -86,9 +86,8 @@ func TestStatusJson(t *testing.T) { func TestNodeStatusResponse(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) - s := srv.(*server.TestServer) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(context.Background()) node := s.Node().(*server.Node) wrapper := serverpb.NodesResponse{} diff --git a/pkg/server/storage_api/ranges_test.go b/pkg/server/storage_api/ranges_test.go index e37934df98b..d39751d94b0 100644 --- a/pkg/server/storage_api/ranges_test.go +++ b/pkg/server/storage_api/ranges_test.go @@ -32,10 +32,8 @@ func TestRangesResponse(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) defer kvserver.EnableLeaseHistoryForTesting(100)() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - - ts := s.(*server.TestServer) + ts := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer ts.Stopper().Stop(context.Background()) t.Run("test ranges response", func(t *testing.T) { // Perform a scan to ensure that all the raft groups are initialized. @@ -106,9 +104,8 @@ func TestTenantRangesResponse(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - ts := s.(*server.TestServer) + ts := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer ts.Stopper().Stop(ctx) t.Run("returns error when TenantID not set in ctx", func(t *testing.T) { rpcStopper := stop.NewStopper() diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index b1b802077b5..c4c1ad4c8f4 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvprober" @@ -60,7 +59,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/ts" "github.com/cockroachdb/cockroach/pkg/upgrade/upgradebase" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/admission" @@ -333,15 +331,12 @@ func makeTestConfigFromParams(params base.TestServerArgs) Config { // A TestServer encapsulates an in-memory instantiation of a cockroach node with // a single store. It provides tests with access to Server internals. // Where possible, it should be used through the -// testingshim.TestServerInterface. +// serverutils.TestServerInterface. // // Example usage of a TestServer: // // s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) // defer s.Stopper().Stop() -// // If really needed, in tests that can depend on server, downcast to -// // server.TestServer: -// ts := s.(*server.TestServer) type TestServer struct { Cfg *Config params base.TestServerArgs @@ -473,11 +468,8 @@ func (ts *TestServer) RPCContext() *rpc.Context { } // TsDB returns the ts.DB instance used by the TestServer. -func (ts *TestServer) TsDB() *ts.DB { - if ts != nil { - return ts.tsDB - } - return nil +func (ts *TestServer) TsDB() interface{} { + return ts.tsDB } // SQLConn is part of the serverutils.ApplicationLayerInterface. @@ -536,8 +528,8 @@ func (ts *TestServer) PGPreServer() interface{} { return nil } -// RaftTransport returns the RaftTransport used by the TestServer. -func (ts *TestServer) RaftTransport() *kvserver.RaftTransport { +// RaftTransport is part of the serverutils.StorageLayerInterface. +func (ts *TestServer) RaftTransport() interface{} { if ts != nil { return ts.raftTransport } @@ -559,6 +551,11 @@ func (ts *TestServer) TestingKnobs() *base.TestingKnobs { return nil } +// SQLServerInternal is part of the serverutils.ApplicationLayerInterface. +func (ts *TestServer) SQLServerInternal() interface{} { + return ts.sqlServer +} + // TenantStatusServer returns the TenantStatusServer used by the TestServer. func (ts *TestServer) TenantStatusServer() interface{} { return ts.status @@ -569,6 +566,16 @@ func (ts *TestServer) TestTenants() []serverutils.ApplicationLayerInterface { return ts.testTenants } +// DefaultTestTenantDisabled is part of the serverutils.TenantControlInterface. +func (ts *TestServer) DefaultTestTenantDisabled() bool { + return ts.cfg.DisableDefaultTestTenant +} + +// DisableDefaultTestTenant is part of the serverutils.TenantControlInterface. +func (ts *TestServer) DisableDefaultTestTenant() { + ts.cfg.DisableDefaultTestTenant = true +} + // maybeStartDefaultTestTenant might start a test tenant. This can then be used // for multi-tenant testing, where the default SQL connection will be made to // this tenant instead of to the system tenant. Note that we will @@ -695,6 +702,12 @@ func (ts *TestServer) Start(ctx context.Context) error { return nil } +// Stop is part of the serverutils.TestServerInterface. +func (ts *TestServer) Stop(ctx context.Context) { + ctx = ts.Server.AnnotateCtx(ctx) + ts.Server.stopper.Stop(ctx) +} + // TestTenant is an in-memory instantiation of the SQL-only process created for // each active Cockroach tenant. TestTenant provides tests with access to // internal methods and state on SQLServer. It is typically started in tests by @@ -897,6 +910,11 @@ func (t *TestTenant) TestingKnobs() *base.TestingKnobs { return &t.Cfg.TestingKnobs } +// SQLServerInternal is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) SQLServerInternal() interface{} { + return t.sql +} + // SpanConfigKVAccessor is part of the serverutils.ApplicationLayerInterface. func (t *TestTenant) SpanConfigKVAccessor() interface{} { return t.sql.tenantConnect @@ -933,6 +951,11 @@ func (t *TestTenant) DrainClients(ctx context.Context) error { return t.drain.drainClients(ctx, nil /* reporter */) } +// Readiness is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) Readiness(ctx context.Context) error { + return t.t.admin.checkReadinessForHealthCheck(ctx) +} + // MustGetSQLCounter implements the serverutils.ApplicationLayerInterface. func (t *TestTenant) MustGetSQLCounter(name string) int64 { return mustGetSQLCounterForRegistry(t.sql.metricsRegistry, name) @@ -974,6 +997,11 @@ func (t *TestTenant) ForceTableGC( return internalForceTableGC(ctx, t, database, table, timestamp) } +// DefaultZoneConfig is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) DefaultZoneConfig() zonepb.ZoneConfig { + return *t.SystemConfigProvider().GetSystemConfig().DefaultZoneConfig +} + // SettingsWatcher is part of the serverutils.ApplicationLayerInterface. func (t *TestTenant) SettingsWatcher() interface{} { return t.sql.settingsWatcher @@ -1139,6 +1167,21 @@ func (t *TestTenant) StatsForSpan( return t.t.admin.statsForSpan(ctx, span) } +// SetReady is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) SetReady(ready bool) { + t.sql.isReady.Set(ready) +} + +// SetAcceptSQLWithoutTLS is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) SetAcceptSQLWithoutTLS(accept bool) { + t.Cfg.AcceptSQLWithoutTLS = accept +} + +// PrivilegeChecker is part of the serverutils.ApplicationLayerInterface. +func (t *TestTenant) PrivilegeChecker() interface{} { + return t.t.admin.privilegeChecker +} + // HTTPAuthServer is part of the serverutils.ApplicationLayerInterface. func (t *TestTenant) HTTPAuthServer() interface{} { return t.t.authentication @@ -1506,11 +1549,6 @@ func ExpectedInitialRangeCount( return len(config.StaticSplits()) + len(splits) + 1, nil } -// Stores returns the collection of stores from this TestServer's node. -func (ts *TestServer) Stores() *kvserver.Stores { - return ts.node.stores -} - // GetStores is part of the serverutils.StorageLayerInterface. func (ts *TestServer) GetStores() interface{} { return ts.node.stores @@ -1563,12 +1601,16 @@ func (ts *TestServer) DrainClients(ctx context.Context) error { return ts.drain.drainClients(ctx, nil /* reporter */) } -// Readiness returns nil when the server's health probe reports -// readiness, a readiness error otherwise. +// Readiness is part of the serverutils.ApplicationLayerInterface. func (ts *TestServer) Readiness(ctx context.Context) error { return ts.admin.checkReadinessForHealthCheck(ctx) } +// SetReadyFn is part of TestServerInterface. +func (ts *TestServer) SetReadyFn(fn func(bool)) { + ts.Server.cfg.ReadyFn = fn +} + // WriteSummaries implements the serverutils.StorageLayerInterface. func (ts *TestServer) WriteSummaries() error { return ts.node.writeNodeStatus(context.TODO(), time.Hour, false) @@ -1609,7 +1651,7 @@ func (ts *TestServer) MustGetSQLNetworkCounter(name string) int64 { return mustGetSQLCounterForRegistry(reg, name) } -// Locality returns the Locality used by the TestServer. +// Locality is part of the serverutils.StorageLayerInterface. func (ts *TestServer) Locality() *roachpb.Locality { return &ts.cfg.Locality } @@ -1639,12 +1681,6 @@ func (ts *TestServer) DistSenderI() interface{} { return ts.distSender } -// DistSender is like DistSenderI(), but returns the real type instead of -// interface{}. -func (ts *TestServer) DistSender() *kvcoord.DistSender { - return ts.DistSenderI().(*kvcoord.DistSender) -} - // MigrationServer is part of the serverutils.ApplicationLayerInterface. func (ts *TestServer) MigrationServer() interface{} { return ts.Server.migrationServer @@ -1702,7 +1738,7 @@ func (ts *TestServer) SetDistSQLSpanResolver(spanResolver interface{}) { // GetFirstStoreID is part of the serverutils.StorageLayerInterface. func (ts *TestServer) GetFirstStoreID() roachpb.StoreID { firstStoreID := roachpb.StoreID(-1) - err := ts.Stores().VisitStores(func(s *kvserver.Store) error { + err := ts.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { if firstStoreID == -1 { firstStoreID = s.Ident.StoreID } @@ -1743,14 +1779,7 @@ func (ts *TestServer) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, return ts.LookupRange(leftKey) } -// SplitRangeWithExpiration splits the range containing splitKey with a sticky -// bit expiring at expirationTime. -// The right range created by the split starts at the split key and extends to the -// original range's end key. -// Returns the new descriptors of the left and right ranges. -// -// splitKey must correspond to a SQL table key (it must end with a family ID / -// col ID). +// SplitRangeWithExpiration is part of the serverutils.StorageLayerInterface. func (ts *TestServer) SplitRangeWithExpiration( splitKey roachpb.Key, expirationTime hlc.Timestamp, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { @@ -1829,46 +1858,10 @@ func (ts *TestServer) SplitRange( return ts.SplitRangeWithExpiration(splitKey, hlc.MaxTimestamp) } -// LeaseInfo describes a range's current and potentially future lease. -type LeaseInfo struct { - cur, next roachpb.Lease -} - -// Current returns the range's current lease. -func (l LeaseInfo) Current() roachpb.Lease { - return l.cur -} - -// CurrentOrProspective returns the range's potential next lease, if a lease -// request is in progress, or the current lease otherwise. -func (l LeaseInfo) CurrentOrProspective() roachpb.Lease { - if !l.next.Empty() { - return l.next - } - return l.cur -} - -// LeaseInfoOpt enumerates options for GetRangeLease. -type LeaseInfoOpt int - -const ( - // AllowQueryToBeForwardedToDifferentNode specifies that, if the current node - // doesn't have a voter replica, the lease info can come from a different - // node. - AllowQueryToBeForwardedToDifferentNode LeaseInfoOpt = iota - // QueryLocalNodeOnly specifies that an error should be returned if the node - // is not able to serve the lease query (because it doesn't have a voting - // replica). - QueryLocalNodeOnly -) - -// GetRangeLease returns information on the lease for the range containing key, and a -// timestamp taken from the node. The lease is returned regardless of its status. -// -// queryPolicy specifies if its OK to forward the request to a different node. +// GetRangeLease is part of severutils.StorageLayerInterface. func (ts *TestServer) GetRangeLease( - ctx context.Context, key roachpb.Key, queryPolicy LeaseInfoOpt, -) (_ LeaseInfo, now hlc.ClockTimestamp, _ error) { + ctx context.Context, key roachpb.Key, queryPolicy roachpb.LeaseInfoOpt, +) (_ roachpb.LeaseInfo, now hlc.ClockTimestamp, _ error) { leaseReq := kvpb.LeaseInfoRequest{ RequestHeader: kvpb.RequestHeader{ Key: key, @@ -1888,23 +1881,22 @@ func (ts *TestServer) GetRangeLease( &leaseReq, ) if pErr != nil { - return LeaseInfo{}, hlc.ClockTimestamp{}, pErr.GoError() + return roachpb.LeaseInfo{}, hlc.ClockTimestamp{}, pErr.GoError() } // Adapt the LeaseInfoResponse format to LeaseInfo. resp := leaseResp.(*kvpb.LeaseInfoResponse) - if queryPolicy == QueryLocalNodeOnly && resp.EvaluatedBy != ts.GetFirstStoreID() { + if queryPolicy == roachpb.QueryLocalNodeOnly && resp.EvaluatedBy != ts.GetFirstStoreID() { // TODO(andrei): Figure out how to deal with nodes with multiple stores. // This API should permit addressing the query to a particular store. - return LeaseInfo{}, hlc.ClockTimestamp{}, errors.Errorf( + return roachpb.LeaseInfo{}, hlc.ClockTimestamp{}, errors.Errorf( "request not evaluated locally; evaluated by s%d instead of local s%d", resp.EvaluatedBy, ts.GetFirstStoreID()) } - var l LeaseInfo + var l roachpb.LeaseInfo if resp.CurrentLease != nil { - l.cur = *resp.CurrentLease - l.next = resp.Lease + l = roachpb.MakeLeaseInfo(*resp.CurrentLease, resp.Lease) } else { - l.cur = resp.Lease + l = roachpb.MakeLeaseInfo(resp.Lease, roachpb.Lease{}) } return l, ts.Clock().NowAsClockTimestamp(), nil } @@ -1932,6 +1924,11 @@ func (ts *TestServer) StorageLayer() serverutils.StorageLayerInterface { return ts } +// TenantController is part of the serverutils.TestServerInterface. +func (ts *TestServer) TenantController() serverutils.TenantControlInterface { + return ts +} + // SystemLayer is part of the serverutils.TestServerInterface. func (ts *TestServer) SystemLayer() serverutils.ApplicationLayerInterface { return ts @@ -1977,8 +1974,17 @@ func internalForceTableGC( return pErr.GoError() } -// ScratchRange is like ScratchRangeEx, but only returns the start key of the -// new range instead of the range descriptor. +// DefaultZoneConfig is part of the serverutils.ApplicationLayerInterface. +func (ts *TestServer) DefaultZoneConfig() zonepb.ZoneConfig { + return *ts.SystemConfigProvider().GetSystemConfig().DefaultZoneConfig +} + +// DefaultSystemZoneConfig is part of the serverutils.StorageLayerInterface. +func (ts *TestServer) DefaultSystemZoneConfig() zonepb.ZoneConfig { + return ts.Server.cfg.DefaultSystemZoneConfig +} + +// ScratchRange is part of the serverutils.StorageLayerInterface. func (ts *TestServer) ScratchRange() (roachpb.Key, error) { _, desc, err := ts.ScratchRangeEx() if err != nil { @@ -1987,15 +1993,13 @@ func (ts *TestServer) ScratchRange() (roachpb.Key, error) { return desc.StartKey.AsRawKey(), nil } -// ScratchRangeEx splits off a range suitable to be used as KV scratch space. -// (it doesn't overlap system spans or SQL tables). +// ScratchRangeEx is part of the serverutils.StorageLayerInterface. func (ts *TestServer) ScratchRangeEx() (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { scratchKey := keys.ScratchRangeMin return ts.SplitRange(scratchKey) } -// ScratchRangeWithExpirationLease is like ScratchRangeWithExpirationLeaseEx but -// returns a key for the RHS ranges, instead of both descriptors from the split. +// ScratchRangeWithExpirationLease is part of the serverutils.StorageLayerInterface. func (ts *TestServer) ScratchRangeWithExpirationLease() (roachpb.Key, error) { _, desc, err := ts.ScratchRangeWithExpirationLeaseEx() if err != nil { @@ -2004,8 +2008,7 @@ func (ts *TestServer) ScratchRangeWithExpirationLease() (roachpb.Key, error) { return desc.StartKey.AsRawKey(), nil } -// ScratchRangeWithExpirationLeaseEx is like ScratchRange but creates a range with -// an expiration based lease. +// ScratchRangeWithExpirationLeaseEx is part of the serverutils.StorageLayerInterface. func (ts *TestServer) ScratchRangeWithExpirationLeaseEx() ( roachpb.RangeDescriptor, roachpb.RangeDescriptor, @@ -2016,6 +2019,11 @@ func (ts *TestServer) ScratchRangeWithExpirationLeaseEx() ( return ts.SplitRange(scratchKey) } +// RaftConfig is part of the serverutils.StorageLayerInterface. +func (ts *TestServer) RaftConfig() base.RaftConfig { + return ts.Cfg.RaftConfig +} + // MetricsRecorder periodically records node-level and store-level metrics. func (ts *TestServer) MetricsRecorder() *status.MetricsRecorder { return ts.node.recorder @@ -2096,11 +2104,21 @@ func (ts *TestServer) StatsForSpan( return ts.admin.statsForSpan(ctx, span) } -// TestingSetReady is exposed for use in health tests. -func (ts *TestServer) TestingSetReady(ready bool) { +// SetReady is part of the serverutils.ApplicationLayerInterface. +func (ts *TestServer) SetReady(ready bool) { ts.sqlServer.isReady.Set(ready) } +// SetAcceptSQLWithoutTLS is part of the serverutils.ApplicationLayerInterface. +func (ts *TestServer) SetAcceptSQLWithoutTLS(accept bool) { + ts.Cfg.AcceptSQLWithoutTLS = accept +} + +// PrivilegeChecker is part of the serverutils.ApplicationLayerInterface. +func (ts *TestServer) PrivilegeChecker() interface{} { + return ts.admin.privilegeChecker +} + // HTTPAuthServer is part of the ApplicationLayerInterface. func (ts *TestServer) HTTPAuthServer() interface{} { return ts.t.authentication @@ -2134,7 +2152,7 @@ func (testServerFactoryImpl) MakeRangeTestServerArgs() base.TestServerArgs { // PrepareRangeTestServer is part of the rangetestutils.TestServerFactory interface. func (testServerFactoryImpl) PrepareRangeTestServer(srv interface{}) error { - ts := srv.(*TestServer) + ts := srv.(serverutils.TestServerInterface) kvDB := ts.ApplicationLayer().DB() // Make sure the range is spun up with an arbitrary read command. We do not @@ -2146,7 +2164,7 @@ func (testServerFactoryImpl) PrepareRangeTestServer(srv interface{}) error { // Make sure the node status is available. This is done by forcing stores to // publish their status, synchronizing to the event feed with a canary // event, and then forcing the server to write summaries immediately. - if err := ts.node.computeMetricsPeriodically(context.Background(), map[*kvserver.Store]*storage.MetricsForInterval{}, 0); err != nil { + if err := ts.Node().(*Node).computeMetricsPeriodically(context.Background(), map[*kvserver.Store]*storage.MetricsForInterval{}, 0); err != nil { return errors.Wrap(err, "error publishing store statuses") } diff --git a/pkg/server/user_test.go b/pkg/server/user_test.go index 5676d082fed..f6d46285e47 100644 --- a/pkg/server/user_test.go +++ b/pkg/server/user_test.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/apiconstants" + "github.com/cockroachdb/cockroach/pkg/server/privchecker" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/srvtestutils" "github.com/cockroachdb/cockroach/pkg/sql/roleoption" @@ -33,17 +34,20 @@ import ( func TestValidRoles(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) + srv, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(context.Background()) + + s := srv.ApplicationLayer() ctx := context.Background() fooUser := username.MakeSQLUsernameFromPreNormalizedString("foo") _, err := sqlDB.Exec(fmt.Sprintf("CREATE USER %s", fooUser)) require.NoError(t, err) + privChecker := s.PrivilegeChecker().(privchecker.SQLPrivilegeChecker) for name := range roleoption.ByName { // Test user without the role. - hasRole, err := s.(*TestServer).status.baseStatusServer.privilegeChecker.HasRoleOption(ctx, fooUser, roleoption.ByName[name]) + hasRole, err := privChecker.HasRoleOption(ctx, fooUser, roleoption.ByName[name]) require.NoError(t, err) require.Equal(t, false, hasRole) @@ -62,7 +66,7 @@ func TestValidRoles(t *testing.T) { _, err = sqlDB.Exec(fmt.Sprintf("ALTER USER %s %s%s", fooUser, name, extraInfo)) require.NoError(t, err) - hasRole, err = s.(*TestServer).status.baseStatusServer.privilegeChecker.HasRoleOption(ctx, fooUser, roleoption.ByName[name]) + hasRole, err = privChecker.HasRoleOption(ctx, fooUser, roleoption.ByName[name]) require.NoError(t, err) expectedHasRole := true diff --git a/pkg/sql/authorization_test.go b/pkg/sql/authorization_test.go index 594a3d1c751..71e21651dbd 100644 --- a/pkg/sql/authorization_test.go +++ b/pkg/sql/authorization_test.go @@ -15,7 +15,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -29,11 +28,9 @@ func TestCheckAnyPrivilegeForNodeUser(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + ts := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - - ts := s.(*server.TestServer) + defer ts.Stopper().Stop(ctx) require.NotNil(t, ts.InternalExecutor()) diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index 1ed6748207c..8e4570cfd7b 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -31,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/settingswatcher" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -639,7 +638,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); } func acquire( - ctx context.Context, s *server.TestServer, descID descpb.ID, + ctx context.Context, s serverutils.TestServerInterface, descID descpb.ID, ) (lease.LeasedDescriptor, error) { return s.LeaseManager().(*lease.Manager).Acquire(ctx, s.Clock().Now(), descID) } @@ -710,11 +709,11 @@ CREATE TABLE test.t(a INT PRIMARY KEY); tableDesc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") ctx := context.Background() - lease1, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) + lease1, err := acquire(ctx, s, tableDesc.GetID()) if err != nil { t.Fatal(err) } - lease2, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) + lease2, err := acquire(ctx, s, tableDesc.GetID()) if err != nil { t.Fatal(err) } @@ -737,7 +736,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); <-deleted // We should still be able to acquire, because we have an active lease. - lease3, err := acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) + lease3, err := acquire(ctx, s, tableDesc.GetID()) if err != nil { t.Fatal(err) } @@ -748,7 +747,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); lease3.Release(ctx) // Now we shouldn't be able to acquire any more. - _, err = acquire(ctx, s.(*server.TestServer), tableDesc.GetID()) + _, err = acquire(ctx, s, tableDesc.GetID()) if !testutils.IsError(err, "descriptor is being dropped") { t.Fatalf("got a different error than expected: %v", err) } diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index ecf0e473919..592cc2ddd99 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -29,7 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" @@ -525,8 +524,8 @@ func TestDropIndexWithZoneConfigOSS(t *testing.T) { // required" error. zoneConfig := zonepb.ZoneConfig{ Subzones: []zonepb.Subzone{ - {IndexID: uint32(tableDesc.GetPrimaryIndexID()), Config: s.(*server.TestServer).Cfg.DefaultZoneConfig}, - {IndexID: uint32(index.GetID()), Config: s.(*server.TestServer).Cfg.DefaultZoneConfig}, + {IndexID: uint32(tableDesc.GetPrimaryIndexID()), Config: s.DefaultZoneConfig()}, + {IndexID: uint32(index.GetID()), Config: s.DefaultZoneConfig()}, }, } zoneConfigBytes, err := protoutil.Marshal(&zoneConfig) diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index 197cea08507..81bf029ee60 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/isolation" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/isql" @@ -541,9 +540,8 @@ func TestInternalExecutorPushDetectionInTxn(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := context.Background() params, _ := tests.CreateTestServerParams() - si, _, db := serverutils.StartServer(t, params) - defer si.Stopper().Stop(ctx) - s := si.(*server.TestServer) + s, _, db := serverutils.StartServer(t, params) + defer s.Stopper().Stop(ctx) // Setup a txn. txn := db.NewTxn(ctx, "test") diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index 16dbbf0e995..3354ae43310 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/pgwire" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/identmap" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -228,11 +227,11 @@ func hbaRunTest(t *testing.T, insecure bool) { // Enable conn/auth logging. // We can't use the cluster settings to do this, because // cluster settings propagate asynchronously. - testServer := s.(*server.TestServer) - pgServer := s.ApplicationLayer().PGServer().(*pgwire.Server) + testServer := s.ApplicationLayer() + pgServer := testServer.PGServer().(*pgwire.Server) pgServer.TestingEnableConnLogging() pgServer.TestingEnableAuthLogging() - s.ApplicationLayer().PGPreServer().(*pgwire.PreServeConnHandler).TestingAcceptSystemIdentityOption(true) + testServer.PGPreServer().(*pgwire.PreServeConnHandler).TestingAcceptSystemIdentityOption(true) httpClient, err := s.GetAdminHTTPClient() if err != nil { @@ -271,7 +270,7 @@ func hbaRunTest(t *testing.T, insecure bool) { } case "accept_sql_without_tls": - testServer.Cfg.AcceptSQLWithoutTLS = true + testServer.SetAcceptSQLWithoutTLS(true) case "set_hba": _, err := conn.ExecContext(context.Background(), @@ -634,9 +633,11 @@ func TestClientAddrOverride(t *testing.T) { defer sc.Close(t) // Start a server. - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) + defer srv.Stopper().Stop(ctx) + + s := srv.ApplicationLayer() pgURL, cleanupFunc := sqlutils.PGUrl( t, s.AdvSQLAddr(), "testClientAddrOverride" /* prefix */, url.User(username.TestUser), @@ -651,9 +652,9 @@ func TestClientAddrOverride(t *testing.T) { // Enable conn/auth logging. // We can't use the cluster settings to do this, because // cluster settings for booleans propagate asynchronously. - pgServer := s.ApplicationLayer().PGServer().(*pgwire.Server) + pgServer := s.PGServer().(*pgwire.Server) pgServer.TestingEnableAuthLogging() - pgPreServer := s.ApplicationLayer().PGPreServer().(*pgwire.PreServeConnHandler) + pgPreServer := s.PGPreServer().(*pgwire.PreServeConnHandler) testCases := []struct { specialAddr string @@ -792,19 +793,20 @@ func TestSSLSessionVar(t *testing.T) { sc := log.ScopeWithoutShowLogs(t) defer sc.Close(t) - // Start a server. - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ - DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(107310), - }) - s.(*server.TestServer).Cfg.AcceptSQLWithoutTLS = true ctx := context.Background() - defer s.Stopper().Stop(ctx) + // Start a server. + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + + s := srv.ApplicationLayer() // Ensure the test user exists. if _, err := db.Exec(fmt.Sprintf(`CREATE USER %s WITH PASSWORD 'abc'`, username.TestUser)); err != nil { t.Fatal(err) } + s.SetAcceptSQLWithoutTLS(true) + pgURLWithCerts, cleanupFuncCerts := sqlutils.PGUrlWithOptionalClientCerts( t, s.AdvSQLAddr(), "TestSSLSessionVarCerts" /* prefix */, url.User(username.TestUser), true, ) diff --git a/pkg/sql/pgwire/conn_test.go b/pkg/sql/pgwire/conn_test.go index b0cfb862d80..9d4f2cdaee7 100644 --- a/pkg/sql/pgwire/conn_test.go +++ b/pkg/sql/pgwire/conn_test.go @@ -194,9 +194,9 @@ func TestConnMessageTooBig(t *testing.T) { ctx := context.Background() params, _ := tests.CreateTestServerParams() - s, mainDB, _ := serverutils.StartServer(t, params) - defer mainDB.Close() - defer s.Stopper().Stop(context.Background()) + srv, mainDB, _ := serverutils.StartServer(t, params) + defer srv.Stopper().Stop(context.Background()) + s := srv.ApplicationLayer() // Form a 1MB string. longStr := "a" @@ -917,9 +917,10 @@ func TestConnCloseReleasesLocks(t *testing.T) { // We're going to test closing the connection in both the Open and Aborted // state. testutils.RunTrueAndFalse(t, "open state", func(t *testing.T, open bool) { - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() pgURL, cleanupFunc := sqlutils.PGUrl( t, s.AdvSQLAddr(), "testConnClose" /* prefix */, url.User(username.RootUser), @@ -986,9 +987,11 @@ func TestConnCloseReleasesLocks(t *testing.T) { func TestConnCloseWhileProducingRows(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + + s := srv.ApplicationLayer() // Disable results buffering. if _, err := db.Exec( @@ -1202,8 +1205,9 @@ func TestReadTimeoutConnExits(t *testing.T) { func TestConnResultsBufferSize(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(context.Background()) + s := srv.ApplicationLayer() // Check that SHOW results_buffer_size correctly exposes the value when it // inherits the default. @@ -1275,7 +1279,7 @@ func TestConnCloseCancelsAuth(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) authBlocked := make(chan struct{}) - s := serverutils.StartServerOnly(t, + srv := serverutils.StartServerOnly(t, base.TestServerArgs{ Insecure: true, Knobs: base.TestingKnobs{ @@ -1293,7 +1297,8 @@ func TestConnCloseCancelsAuth(t *testing.T) { }, }) ctx := context.Background() - defer s.Stopper().Stop(ctx) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() // We're going to open a client connection and do the minimum so that the // server gets to the authentication phase, where it will block. @@ -1325,7 +1330,7 @@ func TestConnServerAbortsOnRepeatedErrors(t *testing.T) { defer log.Scope(t).Close(t) var shouldError uint32 = 0 testingKnobError := fmt.Errorf("a random error") - s, db, _ := serverutils.StartServer(t, + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{ Insecure: true, Knobs: base.TestingKnobs{ @@ -1340,8 +1345,7 @@ func TestConnServerAbortsOnRepeatedErrors(t *testing.T) { }, }) ctx := context.Background() - defer s.Stopper().Stop(ctx) - defer db.Close() + defer srv.Stopper().Stop(ctx) conn, err := db.Conn(ctx) require.NoError(t, err) @@ -1717,10 +1721,10 @@ func TestParseSearchPathInConnectionString(t *testing.T) { }, } - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) - defer db.Close() + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { @@ -1748,10 +1752,10 @@ func TestParseSearchPathInConnectionString(t *testing.T) { func TestSetSessionArguments(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) - defer db.Close() + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() pgURL, cleanupFunc := sqlutils.PGUrl( t, s.AdvSQLAddr(), "testConnClose" /* prefix */, url.User(username.RootUser), @@ -1828,9 +1832,10 @@ func TestRoleDefaultSettings(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(ctx) - defer db.Close() + srv, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + + s := srv.ApplicationLayer() _, err := db.ExecContext(ctx, "CREATE ROLE testuser WITH LOGIN") require.NoError(t, err) @@ -1989,8 +1994,10 @@ func TestPGWireRejectsNewConnIfTooManyConns(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - testServer := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer testServer.Stopper().Stop(ctx) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer srv.Stopper().Stop(ctx) + + testServer := srv.ApplicationLayer() // Users. rootUser := username.RootUser @@ -2037,7 +2044,7 @@ func TestPGWireRejectsNewConnIfTooManyConns(t *testing.T) { } getConnectionCount := func() int { - return int(testServer.ApplicationLayer().SQLServer().(*sql.Server).GetConnectionCount()) + return int(testServer.SQLServer().(*sql.Server).GetConnectionCount()) } requireConnectionCount := func(t *testing.T, expectedCount int) { @@ -2204,9 +2211,11 @@ func TestPGWireRejectsNewConnIfTooManyConns(t *testing.T) { func TestConnCloseReleasesReservedMem(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) ctx := context.Background() - defer s.Stopper().Stop(ctx) + defer srv.Stopper().Stop(ctx) + + s := srv.ApplicationLayer() before := s.PGServer().(*Server).tenantSpecificConnMonitor.AllocBytes() diff --git a/pkg/sql/pgwire/main_test.go b/pkg/sql/pgwire/main_test.go index 6eed0ddc470..f9b6a9948d2 100644 --- a/pkg/sql/pgwire/main_test.go +++ b/pkg/sql/pgwire/main_test.go @@ -14,6 +14,7 @@ import ( "os" "testing" + "github.com/cockroachdb/cockroach/pkg/ccl" "github.com/cockroachdb/cockroach/pkg/security/securityassets" "github.com/cockroachdb/cockroach/pkg/security/securitytest" "github.com/cockroachdb/cockroach/pkg/server" @@ -30,6 +31,7 @@ func TestMain(m *testing.M) { randutil.SeedForTests() serverutils.InitTestServerFactory(server.TestServerFactory) serverutils.InitTestClusterFactory(testcluster.TestClusterFactory) + defer ccl.TestingEnableEnterprise()() os.Exit(m.Run()) } diff --git a/pkg/sql/pgwire/pgtest_test.go b/pkg/sql/pgwire/pgtest_test.go index a2c91e60e42..1c1fcbbecbf 100644 --- a/pkg/sql/pgwire/pgtest_test.go +++ b/pkg/sql/pgwire/pgtest_test.go @@ -43,7 +43,7 @@ func TestPGTest(t *testing.T) { cleanup = func() { s.Stopper().Stop(ctx) } - addr = s.AdvSQLAddr() + addr = s.ApplicationLayer().AdvSQLAddr() user = username.RootUser // None of the tests read that much data, so we hardcode the max message // size to something small. This lets us test the handling of large diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index 13984ced6eb..0d6795ab8eb 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -29,7 +29,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/ccl" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql" @@ -77,7 +76,7 @@ func TestPGWireDrainClient(t *testing.T) { defer srv.Stopper().Stop(ctx) tt := srv.ApplicationLayer() - host, port, err := net.SplitHostPort(srv.AdvSQLAddr()) + host, port, err := net.SplitHostPort(tt.AdvSQLAddr()) if err != nil { t.Fatal(err) } @@ -144,9 +143,10 @@ func TestPGWireDrainOngoingTxns(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s := serverutils.StartServerOnly(t, base.TestServerArgs{Insecure: true}) + srv := serverutils.StartServerOnly(t, base.TestServerArgs{Insecure: true}) ctx := context.Background() - defer s.Stopper().Stop(ctx) + defer srv.Stopper().Stop(ctx) + s := srv.ApplicationLayer() host, port, err := net.SplitHostPort(s.AdvSQLAddr()) if err != nil { @@ -166,7 +166,7 @@ func TestPGWireDrainOngoingTxns(t *testing.T) { } defer db.Close() - pgServer := s.ApplicationLayer().PGServer().(*pgwire.Server) + pgServer := s.PGServer().(*pgwire.Server) // Make sure that the server reports correctly the case in which a // connection did not respond to cancellation in time. @@ -1522,14 +1522,10 @@ func TestPGCommandTags(t *testing.T) { // checkSQLNetworkMetrics returns the server's pgwire bytesIn/bytesOut and an // error if the bytesIn/bytesOut don't satisfy the given minimums and maximums. func checkSQLNetworkMetrics( - srv serverutils.TestServerInterface, minBytesIn, minBytesOut, maxBytesIn, maxBytesOut int64, + srv serverutils.ApplicationLayerInterface, minBytesIn, minBytesOut, maxBytesIn, maxBytesOut int64, ) (int64, int64, error) { - if err := srv.WriteSummaries(); err != nil { - return -1, -1, err - } - - bytesIn := srv.ApplicationLayer().MustGetSQLNetworkCounter(pgwire.MetaBytesIn.Name) - bytesOut := srv.ApplicationLayer().MustGetSQLNetworkCounter(pgwire.MetaBytesOut.Name) + bytesIn := srv.MustGetSQLNetworkCounter(pgwire.MetaBytesIn.Name) + bytesOut := srv.MustGetSQLNetworkCounter(pgwire.MetaBytesOut.Name) if a, min := bytesIn, minBytesIn; a < min { return bytesIn, bytesOut, errors.Errorf("bytesin %d < expected min %d", a, min) } @@ -1548,14 +1544,14 @@ func checkSQLNetworkMetrics( func TestSQLNetworkMetrics(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - defer ccl.TestingEnableEnterprise()() - srv := serverutils.StartServerOnly(t, base.TestServerArgs{}) - defer srv.Stopper().Stop(context.Background()) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(context.Background()) + srv := s.ApplicationLayer() // Setup pgwire client. pgURL, cleanupFn := sqlutils.PGUrl( - t, srv.ApplicationLayer().AdvSQLAddr(), t.Name(), url.User(username.RootUser)) + t, srv.AdvSQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanupFn() const minbytes = 10 @@ -1587,7 +1583,7 @@ func TestSQLNetworkMetrics(t *testing.T) { // Verify connection counter. expectConns := func(n int) { testutils.SucceedsSoon(t, func() error { - if conns := srv.ApplicationLayer().MustGetSQLNetworkCounter(pgwire.MetaConns.Name); conns != int64(n) { + if conns := srv.MustGetSQLNetworkCounter(pgwire.MetaConns.Name); conns != int64(n) { return errors.Errorf("connections %d != expected %d", conns, n) } return nil diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index c24589dde5d..cb55e4aba1e 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" @@ -87,11 +86,11 @@ func TestSpanResolverUsesCaches(t *testing.T) { s3 := tc.Servers[3] lr := physicalplan.NewSpanResolver( - s3.Cfg.Settings, + s3.ClusterSettings(), s3.DistSenderI().(*kvcoord.DistSender), s3.GossipI().(*gossip.Gossip), - s3.GetNode().Descriptor.NodeID, - s3.GetNode().Descriptor.Locality, + s3.NodeID(), + *s3.Locality(), s3.Clock(), nil, // rpcCtx replicaoracle.BinPackingChoice) @@ -170,7 +169,7 @@ func populateCache(db *gosql.DB, expectedNumRows int) error { // `CREATE TABLE test (k INT PRIMARY KEY)` at row with value pk (the row will be // the first on the right of the split). func splitRangeAtVal( - ts *server.TestServer, tableDesc catalog.TableDescriptor, pk int, + ts serverutils.TestServerInterface, tableDesc catalog.TableDescriptor, pk int, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { if len(tableDesc.PublicNonPrimaryIndexes()) != 0 { return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, @@ -197,13 +196,13 @@ func TestSpanResolver(t *testing.T) { }) defer s.Stopper().Stop(context.Background()) - rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t) + rowRanges, tableDesc := setupRanges(db, s, cdb, t) lr := physicalplan.NewSpanResolver( - s.(*server.TestServer).Cfg.Settings, + s.ClusterSettings(), s.DistSenderI().(*kvcoord.DistSender), s.GossipI().(*gossip.Gossip), - s.(*server.TestServer).GetNode().Descriptor.NodeID, - s.(*server.TestServer).GetNode().Descriptor.Locality, + s.NodeID(), + *s.Locality(), s.Clock(), nil, // rpcCtx replicaoracle.BinPackingChoice) @@ -296,13 +295,13 @@ func TestMixedDirections(t *testing.T) { }) defer s.Stopper().Stop(context.Background()) - rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t) + rowRanges, tableDesc := setupRanges(db, s, cdb, t) lr := physicalplan.NewSpanResolver( - s.(*server.TestServer).Cfg.Settings, + s.ClusterSettings(), s.DistSenderI().(*kvcoord.DistSender), s.GossipI().(*gossip.Gossip), - s.(*server.TestServer).GetNode().Descriptor.NodeID, - s.(*server.TestServer).GetNode().Descriptor.Locality, + s.NodeID(), + *s.Locality(), s.Clock(), nil, // rpcCtx replicaoracle.BinPackingChoice) @@ -328,7 +327,7 @@ func TestMixedDirections(t *testing.T) { } func setupRanges( - db *gosql.DB, s *server.TestServer, cdb *kv.DB, t *testing.T, + db *gosql.DB, s serverutils.TestServerInterface, cdb *kv.DB, t *testing.T, ) ([]roachpb.RangeDescriptor, catalog.TableDescriptor) { if _, err := db.Exec(`CREATE DATABASE t`); err != nil { t.Fatal(err) diff --git a/pkg/sql/sem/builtins/fingerprint_builtin_test.go b/pkg/sql/sem/builtins/fingerprint_builtin_test.go index 0692df6088f..a8c1a026c14 100644 --- a/pkg/sql/sem/builtins/fingerprint_builtin_test.go +++ b/pkg/sql/sem/builtins/fingerprint_builtin_test.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils/fingerprintutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -44,7 +43,7 @@ func TestFingerprint(t *testing.T) { var mu syncutil.Mutex var numExportResponses int var numSSTsInExportResponses int - serv, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ TestingResponseFilter: func(ctx context.Context, ba *kvpb.BatchRequest, br *kvpb.BatchResponse) *kvpb.Error { @@ -62,6 +61,7 @@ func TestFingerprint(t *testing.T) { }, }, }) + defer s.Stopper().Stop(ctx) resetVars := func() { mu.Lock() @@ -109,10 +109,7 @@ func TestFingerprint(t *testing.T) { require.Zero(t, fingerprint) }) - s := serv.(*server.TestServer) - defer s.Stopper().Stop(ctx) - - store, err := s.Stores().GetStore(s.GetFirstStoreID()) + store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) eng := store.TODOEngine() diff --git a/pkg/sql/split_test.go b/pkg/sql/split_test.go index 29a6f631625..80de35e3543 100644 --- a/pkg/sql/split_test.go +++ b/pkg/sql/split_test.go @@ -17,7 +17,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -170,7 +169,7 @@ func TestSplitAt(t *testing.T) { } } else { // Successful split, verify it happened. - rng, err := s.(*server.TestServer).LookupRange(key) + rng, err := s.LookupRange(key) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/sqlnoccltest/partition_test.go b/pkg/sql/sqlnoccltest/partition_test.go index ac985d76d4f..8b5861e17e7 100644 --- a/pkg/sql/sqlnoccltest/partition_test.go +++ b/pkg/sql/sqlnoccltest/partition_test.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" @@ -107,12 +106,12 @@ func TestRemovePartitioningOSS(t *testing.T) { { IndexID: uint32(tableDesc.GetPrimaryIndexID()), PartitionName: "p1", - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), }, { IndexID: uint32(tableDesc.PublicNonPrimaryIndexes()[0].GetID()), PartitionName: "p2", - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), }, }, } diff --git a/pkg/sql/tests/split_test.go b/pkg/sql/tests/split_test.go index de91ebda2ce..f7e26811135 100644 --- a/pkg/sql/tests/split_test.go +++ b/pkg/sql/tests/split_test.go @@ -79,10 +79,12 @@ func TestSplitOnTableBoundaries(t *testing.T) { }) defer s.Stopper().Stop(context.Background()) + dzcfg := s.DefaultZoneConfig() + dszcfg := s.DefaultSystemZoneConfig() + expectedInitialRanges, err := server.ExpectedInitialRangeCount( keys.SystemSQLCodec, - &s.(*server.TestServer).Cfg.DefaultZoneConfig, - &s.(*server.TestServer).Cfg.DefaultSystemZoneConfig, + &dzcfg, &dszcfg, ) if err != nil { t.Fatal(err) diff --git a/pkg/sql/unsplit_test.go b/pkg/sql/unsplit_test.go index c78207e491a..331ad9846cc 100644 --- a/pkg/sql/unsplit_test.go +++ b/pkg/sql/unsplit_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -203,7 +202,7 @@ func TestUnsplitAt(t *testing.T) { t.Fatalf("%s: unexpected error: %s", tt.unsplitStmt, err) } // Successful unsplit, verify it happened. - rng, err := s.(*server.TestServer).LookupRange(key) + rng, err := s.LookupRange(key) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index a303f7f6700..51837a5eb86 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -46,7 +46,7 @@ var configDescKey = catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, descpb. // forceNewConfig forces a system config update by writing a bogus descriptor with an // incremented value inside. It then repeatedly fetches the gossip config until the // just-written descriptor is found. -func forceNewConfig(t testing.TB, s *server.TestServer) *config.SystemConfig { +func forceNewConfig(t testing.TB, s serverutils.TestServerInterface) *config.SystemConfig { configID++ configDesc := &descpb.Descriptor{ Union: &descpb.Descriptor_Database{ @@ -67,7 +67,7 @@ func forceNewConfig(t testing.TB, s *server.TestServer) *config.SystemConfig { return waitForConfigChange(t, s) } -func waitForConfigChange(t testing.TB, s *server.TestServer) *config.SystemConfig { +func waitForConfigChange(t testing.TB, s serverutils.TestServerInterface) *config.SystemConfig { var foundDesc descpb.Descriptor var cfg *config.SystemConfig testutils.SucceedsSoon(t, func() error { @@ -103,13 +103,12 @@ func TestGetZoneConfig(t *testing.T) { DefaultSystemZoneConfigOverride: &defaultZoneConfig, } - srv, sqlDB, _ := serverutils.StartServer(t, params) - defer srv.Stopper().Stop(context.Background()) + s, sqlDB, _ := serverutils.StartServer(t, params) + defer s.Stopper().Stop(context.Background()) // Set the closed_timestamp interval to be short to shorten the test duration. tdb := sqlutils.MakeSQLRunner(sqlDB) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '20ms'`) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '20ms'`) - s := srv.(*server.TestServer) type testCase struct { objectID uint32 @@ -339,13 +338,12 @@ func TestCascadingZoneConfig(t *testing.T) { DefaultSystemZoneConfigOverride: &defaultZoneConfig, } - srv, sqlDB, _ := serverutils.StartServer(t, params) - defer srv.Stopper().Stop(context.Background()) + s, sqlDB, _ := serverutils.StartServer(t, params) + defer s.Stopper().Stop(context.Background()) // Set the closed_timestamp interval to be short to shorten the test duration. tdb := sqlutils.MakeSQLRunner(sqlDB) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '20ms'`) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '20ms'`) - s := srv.(*server.TestServer) type testCase struct { objectID uint32 @@ -651,13 +649,12 @@ func BenchmarkGetZoneConfig(b *testing.B) { defer log.Scope(b).Close(b) params, _ := tests.CreateTestServerParams() - srv, sqlDB, _ := serverutils.StartServer(b, params) - defer srv.Stopper().Stop(context.Background()) + s, sqlDB, _ := serverutils.StartServer(b, params) + defer s.Stopper().Stop(context.Background()) // Set the closed_timestamp interval to be short to shorten the test duration. tdb := sqlutils.MakeSQLRunner(sqlDB) tdb.Exec(b, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '20ms'`) tdb.Exec(b, `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '20ms'`) - s := srv.(*server.TestServer) cfg := forceNewConfig(b, s) key := roachpb.RKey(keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0))) diff --git a/pkg/sql/zone_test.go b/pkg/sql/zone_test.go index cec51c67dd8..8805417f388 100644 --- a/pkg/sql/zone_test.go +++ b/pkg/sql/zone_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -40,16 +39,16 @@ func TestValidSetShowZones(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE DATABASE d; USE d; CREATE TABLE t ();`) - yamlDefault := fmt.Sprintf("gc: {ttlseconds: %d}", s.(*server.TestServer).Cfg.DefaultZoneConfig.GC.TTLSeconds) + yamlDefault := fmt.Sprintf("gc: {ttlseconds: %d}", s.DefaultZoneConfig().GC.TTLSeconds) yamlOverride := "gc: {ttlseconds: 42}" - zoneOverride := s.(*server.TestServer).Cfg.DefaultZoneConfig + zoneOverride := s.DefaultZoneConfig() zoneOverride.GC = &zonepb.GCPolicy{TTLSeconds: 42} partialZoneOverride := *zonepb.NewZoneConfig() partialZoneOverride.GC = &zonepb.GCPolicy{TTLSeconds: 42} defaultRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), } defaultOverrideRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, @@ -246,12 +245,12 @@ func TestZoneInheritField(t *testing.T) { defaultRow := sqlutils.ZoneRow{ ID: keys.RootNamespaceID, - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), } newReplicationFactor := 10 tableID := sqlutils.QueryTableID(t, db, "d", "public", "t") - newDefCfg := s.(*server.TestServer).Cfg.DefaultZoneConfig + newDefCfg := s.DefaultZoneConfig() newDefCfg.NumReplicas = proto.Int32(int32(newReplicationFactor)) newDefaultRow := sqlutils.ZoneRow{ @@ -261,7 +260,7 @@ func TestZoneInheritField(t *testing.T) { newTableRow := sqlutils.ZoneRow{ ID: tableID, - Config: s.(*server.TestServer).Cfg.DefaultZoneConfig, + Config: s.DefaultZoneConfig(), } // Doesn't have any values of its own. diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 674beda324d..c063fc58b1a 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -617,6 +617,41 @@ func TestLint(t *testing.T) { } }) + t.Run("TestServerCast", func(t *testing.T) { + t.Parallel() + cmd, stderr, filter, err := dirCmd( + pkgDir, + "git", + "grep", + "-nE", + `\*(server\.)?TestServer`, + "--", + "*_test.go", + ":!server/server_special_test.go", + ":!server/server_controller_test.go", + ":!server/settings_cache_test.go", + ) + if err != nil { + t.Fatal(err) + } + + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + if err := stream.ForEach(filter, func(s string) { + t.Errorf("\n%s <- forbidden; use Go interfaces instead (see testutils/serverutils/api.go)", s) + }); err != nil { + t.Error(err) + } + + if err := cmd.Wait(); err != nil { + if out := stderr.String(); len(out) > 0 { + t.Fatalf("err=%s, stderr=%s", err, out) + } + } + }) + t.Run("TestSQLTelemetryDirectCount", func(t *testing.T) { t.Parallel() cmd, stderr, filter, err := dirCmd( diff --git a/pkg/testutils/serverutils/BUILD.bazel b/pkg/testutils/serverutils/BUILD.bazel index 5213f3eb30f..808c940452c 100644 --- a/pkg/testutils/serverutils/BUILD.bazel +++ b/pkg/testutils/serverutils/BUILD.bazel @@ -14,6 +14,7 @@ go_library( deps = [ "//pkg/base", "//pkg/config", + "//pkg/config/zonepb", "//pkg/keys", "//pkg/kv", "//pkg/kv/kvprober", @@ -23,6 +24,7 @@ go_library( "//pkg/rpc", "//pkg/security", "//pkg/security/username", + "//pkg/server/decommissioning", "//pkg/server/serverpb", "//pkg/server/status", "//pkg/settings/cluster", diff --git a/pkg/testutils/serverutils/api.go b/pkg/testutils/serverutils/api.go index ff1377695b5..055d96e1ff8 100644 --- a/pkg/testutils/serverutils/api.go +++ b/pkg/testutils/serverutils/api.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" + "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvprober" @@ -28,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/server/decommissioning" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/status" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -48,8 +50,19 @@ type TestServerInterface interface { ApplicationLayerInterface TenantControlInterface + // Start runs the server. This is pre-called by StartServer(). + // It is provided for tests that use the TestServerFactory directly + // (mostly 'cockroach demo'). Start(context.Context) error + // Stop stops the server. This must be called at the end of a test + // to avoid leaking resources. + Stop(context.Context) + + // SetReadyFn can be configured to notify a test when the server is + // ready. This is only effective when called before Start(). + SetReadyFn(fn func(bool)) + // ApplicationLayer returns the interface to the application layer that is // exercised by the test. Depending on how the test server is started // and (optionally) randomization, this can be either the SQL layer @@ -63,9 +76,18 @@ type TestServerInterface interface { // StorageLayer returns the interface to the storage layer. StorageLayer() StorageLayerInterface + // TenantController returns the interface to the tenant controller. + TenantController() TenantControlInterface + // BinaryVersionOverride returns the value of an override if set using // TestingKnobs. BinaryVersionOverride() roachpb.Version + + // RunInitialSQL is used by 'cockroach demo' to initialize + // an admin user. + // TODO(knz): Migrate this logic to a demo-specific init task + // or config profile. + RunInitialSQL(ctx context.Context, startSingleNode bool, adminUser, adminPassword string) error } // ApplicationLayerInterface defines accessors to the application @@ -73,6 +95,10 @@ type TestServerInterface interface { // effectively agnostic to whether they use a secondary tenant or not. // This interface is implemented by server.Test{Tenant,Server}. type ApplicationLayerInterface interface { + // Readiness returns true when the server is ready, that is, + // when it is accepting connections and it is not draining. + Readiness(ctx context.Context) error + // SQLInstanceID is the ephemeral ID assigned to a running instance of the // SQLServer. Each tenant can have zero or more running SQLServer instances. SQLInstanceID() base.SQLInstanceID @@ -248,6 +274,10 @@ type ApplicationLayerInterface interface { // TestingKnobs returns the TestingKnobs in use by the test server. TestingKnobs() *base.TestingKnobs + // SQLServerInternal returns the *server.SQLServer as an interface{} + // Note: most tests should use SQLServer() and InternalExecutor() instead. + SQLServerInternal() interface{} + // AmbientCtx retrieves the AmbientContext for this server, // so that a test can instantiate additional one-off components // using the same context details as the server. This should not @@ -347,9 +377,20 @@ type ApplicationLayerInterface interface { ctx context.Context, database, table string, timestamp hlc.Timestamp, ) error - // TODO(irfansharif): We'd benefit from an API to construct a *gosql.DB, or - // better yet, a *sqlutils.SQLRunner. We use it all the time, constructing - // it by hand each time. + // DefaultZoneConfig is a convenience function that accesses + // .SystemConfigProvider().GetSystemConfig().DefaultZoneConfig. + DefaultZoneConfig() zonepb.ZoneConfig + + // SetReady changes the SQL readiness. + SetReady(bool) + + // SetAcceptSQLWithoutTLS changes the corresponding configuration parameter. + SetAcceptSQLWithoutTLS(bool) + + // PrivilegeChecker returns the privilege checker in use by the HTTP + // server. The concrete return value is of type + // privchecker.SQLPrivilegeChecker (interface). + PrivilegeChecker() interface{} } // TenantControlInterface defines the API of a test server that can @@ -397,6 +438,18 @@ type TenantControlInterface interface { // StartedDefaultTestTenant returns true if the server has started // the service for the default test tenant. StartedDefaultTestTenant() bool + + // DefaultTestTenantDisabled returns true if the server has disabled + // the service for the default test tenant. + // TODO(knz): Verify whether this accessor is needed. This should + // be simplified. + DefaultTestTenantDisabled() bool + + // DisableDefaultTestTenant prevents the server from starting the + // service for the default test tenant. + // TODO(knz): Verify whether this accessor is needed. This should + // be simplified. + DisableDefaultTestTenant() } // StorageLayerInterface defines accessors to the storage layer of a @@ -453,10 +506,25 @@ type StorageLayerInterface interface { // SplitRange splits the range containing splitKey. SplitRange(splitKey roachpb.Key) (left roachpb.RangeDescriptor, right roachpb.RangeDescriptor, err error) + // SplitRangeWithExpiration splits the range containing splitKey with a sticky + // bit expiring at expirationTime. + // The right range created by the split starts at the split key and extends to the + // original range's end key. + // Returns the new descriptors of the left and right ranges. + // + // splitKey must correspond to a SQL table key (it must end with a family ID / + // col ID). + SplitRangeWithExpiration( + splitKey roachpb.Key, expirationTime hlc.Timestamp, + ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) + // MergeRanges merges the range containing leftKey with the following adjacent // range. MergeRanges(leftKey roachpb.Key) (merged roachpb.RangeDescriptor, err error) + // LookupRange looks up the range descriptor which contains key. + LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) + // ExpectedInitialRangeCount returns the expected number of ranges that should // be on the server after initial (asynchronous) splits have been completed, // assuming no additional information is added outside of the normal bootstrap @@ -481,6 +549,21 @@ type StorageLayerInterface interface { // TestCluster.ScratchRange() which is idempotent). ScratchRange() (roachpb.Key, error) + // ScratchRangeEx splits off a range suitable to be used as KV scratch space. + // (it doesn't overlap system spans or SQL tables). + ScratchRangeEx() (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) + + // ScratchRangeWithExpirationLease is like ScratchRange but the + // range has an expiration lease. + ScratchRangeWithExpirationLease() (roachpb.Key, error) + + // ScratchRangeWithExpirationLeaseEx is like ScratchRangeEx but the + // range has an expiration lease. + ScratchRangeWithExpirationLeaseEx() ( + roachpb.RangeDescriptor, + roachpb.RangeDescriptor, + error) + // Engines returns the TestServer's engines. Engines() []storage.Engine @@ -499,12 +582,55 @@ type StorageLayerInterface interface { KVFlowHandles() interface{} // KvProber returns a *kvprober.Prober, which is useful when asserting the - //correctness of the prober from integration tests. + // correctness of the prober from integration tests. KvProber() *kvprober.Prober + // RaftTransport returns access to the raft transport. + // The return value is of type *kvserver.RaftTransport. + RaftTransport() interface{} + + // GetRangeLease returns information on the lease for the range + // containing key, and a timestamp taken from the node. The lease is + // returned regardless of its status. + // + // queryPolicy specifies if its OK to forward the request to a + // different node. + GetRangeLease( + ctx context.Context, key roachpb.Key, queryPolicy roachpb.LeaseInfoOpt, + ) (_ roachpb.LeaseInfo, now hlc.ClockTimestamp, _ error) + // TenantCapabilitiesReader retrieves a reference to the // capabilities reader. TenantCapabilitiesReader() tenantcapabilities.Reader + + // TsDB returns the ts.DB instance used by the TestServer. + TsDB() interface{} + + // Locality returns a pointer to the locality used by the server. + // + // TODO(test-eng): investigate if this should really be a pointer. + // + // TODO(test-eng): Investigate if this method should be on + // ApplicationLayerInterface instead. + Locality() *roachpb.Locality + + // DefaultSystemZoneConfig returns the internal system zone config + // for the server. + // Note: most tests should instead use the .DefaultZoneConfig() method + // on ApplicationLayerInterface. + DefaultSystemZoneConfig() zonepb.ZoneConfig + + // DecommissionPreCheck is used to evaluate if nodes are ready for decommission. + DecommissionPreCheck( + ctx context.Context, + nodeIDs []roachpb.NodeID, + strictReadiness bool, + collectTraces bool, + maxErrors int, + ) (decommissioning.PreCheckResult, error) + + // RaftConfig retrieves a copy of the raft configuration. + RaftConfig() base.RaftConfig } // TestServerFactory encompasses the actual implementation of the shim diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index fbbcc8b967e..28b1f28b090 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -60,10 +60,10 @@ import ( // analogous to TestServer, but with control over range replication and join // flags. type TestCluster struct { - Servers []*server.TestServer + Servers []serverutils.TestServerInterface Conns []*gosql.DB - // ReusableListeners is populated if (and only if) TestClusterArgs.ReusableListeners is set. - ReusableListeners map[int] /* idx */ *listenerutil.ReusableListener + // reusableListeners is populated if (and only if) TestClusterArgs.reusableListeners is set. + reusableListeners map[int] /* idx */ *listenerutil.ReusableListener stopper *stop.Stopper mu struct { @@ -97,11 +97,6 @@ func (tc *TestCluster) NodeIDs() []roachpb.NodeID { return nodeIds } -// ServerTyped is like Server, but returns the right type. -func (tc *TestCluster) ServerTyped(idx int) *server.TestServer { - return tc.Servers[idx] -} - // ServerConn is part of TestClusterInterface. func (tc *TestCluster) ServerConn(idx int) *gosql.DB { return tc.Conns[idx] @@ -296,10 +291,10 @@ func NewTestCluster( if reg := clusterArgs.ReusableListenerReg; reg != nil && serverArgs.Listener == nil { ln := reg.MustGetOrCreate(t, i) serverArgs.Listener = ln - if tc.ReusableListeners == nil { - tc.ReusableListeners = map[int]*listenerutil.ReusableListener{} + if tc.reusableListeners == nil { + tc.reusableListeners = map[int]*listenerutil.ReusableListener{} } - tc.ReusableListeners[i] = ln + tc.reusableListeners[i] = ln } if len(serverArgs.StoreSpecs) == 0 { @@ -375,7 +370,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // server in the cluster since they should all be set to the same value // (validated below). probabilisticallyStartTestTenant := false - if !tc.Servers[0].Cfg.DisableDefaultTestTenant { + if !tc.Servers[0].DefaultTestTenantDisabled() { probabilisticallyStartTestTenant = serverutils.ShouldStartDefaultTestTenant(t, tc.serverArgs[0]) } @@ -392,9 +387,9 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // with two separate if checks because the DisableDefaultTestTenant flag // could have been set coming into this function by the caller. if !probabilisticallyStartTestTenant { - tc.Servers[i].Cfg.DisableDefaultTestTenant = true + tc.Servers[i].DisableDefaultTestTenant() } - if tc.Servers[i].Cfg.DisableDefaultTestTenant { + if tc.Servers[i].DefaultTestTenantDisabled() { if startedTestTenant && i > 0 { t.Fatal(errors.Newf("starting only some nodes with a test tenant is not"+ "currently supported - attempted to disable SQL sever on node %d", i)) @@ -560,7 +555,9 @@ func (tc *TestCluster) AddAndStartServerE(serverArgs base.TestServerArgs) error } // AddServer is like AddAndStartServer, except it does not start it. -func (tc *TestCluster) AddServer(serverArgs base.TestServerArgs) (*server.TestServer, error) { +func (tc *TestCluster) AddServer( + serverArgs base.TestServerArgs, +) (serverutils.TestServerInterface, error) { serverArgs.PartOfCluster = true if serverArgs.JoinAddr != "" { serverArgs.NoAutoInitializeCluster = true @@ -605,11 +602,10 @@ func (tc *TestCluster) AddServer(serverArgs base.TestServerArgs) (*server.TestSe serverArgs.Addr = serverArgs.Listener.Addr().String() } - srv, err := serverutils.NewServer(serverArgs) + s, err := serverutils.NewServer(serverArgs) if err != nil { return nil, err } - s := srv.(*server.TestServer) // If we only allowed probabilistic starting of the test tenant, we disable // starting additional tenants, even if we didn't start the test tenant. @@ -768,7 +764,7 @@ func (tc *TestCluster) MergeRangesOrFatal( func (tc *TestCluster) Target(serverIdx int) roachpb.ReplicationTarget { s := tc.Servers[serverIdx] return roachpb.ReplicationTarget{ - NodeID: s.GetNode().Descriptor.NodeID, + NodeID: s.NodeID(), StoreID: s.GetFirstStoreID(), } } @@ -1195,7 +1191,7 @@ func (tc *TestCluster) MoveRangeLeaseNonCooperatively( if err != nil { return nil, err } - destStore, err := destServer.Stores().GetStore(dest.StoreID) + destStore, err := destServer.GetStores().(*kvserver.Stores).GetStore(dest.StoreID) if err != nil { return nil, err } @@ -1288,27 +1284,27 @@ func (tc *TestCluster) FindRangeLease( // stale - i.e. there might be a newer lease unbeknownst to the queried node. func (tc *TestCluster) FindRangeLeaseEx( ctx context.Context, rangeDesc roachpb.RangeDescriptor, hint *roachpb.ReplicationTarget, -) (_ server.LeaseInfo, now hlc.ClockTimestamp, _ error) { - var queryPolicy server.LeaseInfoOpt +) (_ roachpb.LeaseInfo, now hlc.ClockTimestamp, _ error) { + var queryPolicy roachpb.LeaseInfoOpt if hint != nil { var ok bool if _, ok = rangeDesc.GetReplicaDescriptor(hint.StoreID); !ok { - return server.LeaseInfo{}, hlc.ClockTimestamp{}, errors.Errorf( + return roachpb.LeaseInfo{}, hlc.ClockTimestamp{}, errors.Errorf( "bad hint: %+v; store doesn't have a replica of the range", hint) } - queryPolicy = server.QueryLocalNodeOnly + queryPolicy = roachpb.QueryLocalNodeOnly } else { hint = &roachpb.ReplicationTarget{ NodeID: rangeDesc.Replicas().Descriptors()[0].NodeID, StoreID: rangeDesc.Replicas().Descriptors()[0].StoreID} - queryPolicy = server.AllowQueryToBeForwardedToDifferentNode + queryPolicy = roachpb.AllowQueryToBeForwardedToDifferentNode } // Find the server indicated by the hint and send a LeaseInfoRequest through // it. hintServer, err := tc.FindMemberServer(hint.StoreID) if err != nil { - return server.LeaseInfo{}, hlc.ClockTimestamp{}, errors.Wrapf(err, "bad hint: %+v; no such node", hint) + return roachpb.LeaseInfo{}, hlc.ClockTimestamp{}, errors.Wrapf(err, "bad hint: %+v; no such node", hint) } return hintServer.GetRangeLease(ctx, rangeDesc.StartKey.AsRawKey(), queryPolicy) @@ -1399,9 +1395,11 @@ func (tc *TestCluster) WaitForSplitAndInitialization(startKey roachpb.Key) error } // FindMemberServer returns the server containing a given store. -func (tc *TestCluster) FindMemberServer(storeID roachpb.StoreID) (*server.TestServer, error) { +func (tc *TestCluster) FindMemberServer( + storeID roachpb.StoreID, +) (serverutils.TestServerInterface, error) { for _, server := range tc.Servers { - if server.Stores().HasStore(storeID) { + if server.GetStores().(*kvserver.Stores).HasStore(storeID) { return server, nil } } @@ -1414,7 +1412,7 @@ func (tc *TestCluster) findMemberStore(storeID roachpb.StoreID) (*kvserver.Store if err != nil { return nil, err } - return server.Stores().GetStore(storeID) + return server.GetStores().(*kvserver.Stores).GetStore(storeID) } // WaitForFullReplication waits until all stores in the cluster @@ -1445,7 +1443,7 @@ func (tc *TestCluster) WaitForFullReplication() error { for r := retry.Start(opts); r.Next() && notReplicated; { notReplicated = false for _, s := range tc.Servers { - err := s.Stores().VisitStores(func(s *kvserver.Store) error { + err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { if n := s.ClusterNodeCount(); n != len(tc.Servers) { log.Infof(context.TODO(), "%s only sees %d/%d available nodes", s, n, len(tc.Servers)) notReplicated = true @@ -1557,7 +1555,10 @@ func (tc *TestCluster) WaitForNodeStatuses(t serverutils.TestFataler) { nodeIDs[node.Desc.NodeID] = true } for _, s := range tc.Servers { - if id := s.GetNode().Descriptor.NodeID; !nodeIDs[id] { + // Not using s.NodeID() here, on purpose. s.NodeID() uses the + // in-RAM version in the RPC context, which is set earlier than + // the node descriptor. + if id := s.Node().(*server.Node).Descriptor.NodeID; !nodeIDs[id] { return fmt.Errorf("missing n%d in NodeStatus: %+v", id, response) } } @@ -1596,7 +1597,7 @@ func (tc *TestCluster) ReplicationMode() base.TestClusterReplicationMode { // ToggleReplicateQueues implements TestClusterInterface. func (tc *TestCluster) ToggleReplicateQueues(active bool) { for _, s := range tc.Servers { - _ = s.Stores().VisitStores(func(store *kvserver.Store) error { + _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(active) return nil }) @@ -1609,7 +1610,7 @@ func (tc *TestCluster) ToggleReplicateQueues(active bool) { func (tc *TestCluster) ReadIntFromStores(key roachpb.Key) []int64 { results := make([]int64, len(tc.Servers)) for i, server := range tc.Servers { - err := server.Stores().VisitStores(func(s *kvserver.Store) error { + err := server.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { valRes, err := storage.MVCCGet(context.Background(), s.TODOEngine(), key, server.Clock().Now(), storage.MVCCGetOptions{}) if err != nil { @@ -1650,7 +1651,7 @@ func (tc *TestCluster) GetFirstStoreFromServer( t serverutils.TestFataler, server int, ) *kvserver.Store { ts := tc.Servers[server] - store, pErr := ts.Stores().GetStore(ts.GetFirstStoreID()) + store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) } @@ -1679,13 +1680,15 @@ func (tc *TestCluster) RestartServer(idx int) error { // passed in that can observe the server once its been re-created but before it's // been started. This is useful for tests that want to capture that the startup // sequence performs the correct actions i.e. that on startup liveness is gossiped. -func (tc *TestCluster) RestartServerWithInspect(idx int, inspect func(s *server.TestServer)) error { +func (tc *TestCluster) RestartServerWithInspect( + idx int, inspect func(s serverutils.TestServerInterface), +) error { if !tc.ServerStopped(idx) { return errors.Errorf("server %d must be stopped before attempting to restart", idx) } serverArgs := tc.serverArgs[idx] - if ln := tc.ReusableListeners[idx]; ln != nil { + if ln := tc.reusableListeners[idx]; ln != nil { serverArgs.Listener = ln } @@ -1725,11 +1728,10 @@ func (tc *TestCluster) RestartServerWithInspect(idx int, inspect func(s *server. return errors.Errorf("failed to restart Server %d, because a restart can only be used on a server with a sticky engine", i) } } - srv, err := serverutils.NewServer(serverArgs) + s, err := serverutils.NewServer(serverArgs) if err != nil { return err } - s := srv.(*server.TestServer) ctx := context.Background() if err := func() error { @@ -1746,11 +1748,11 @@ func (tc *TestCluster) RestartServerWithInspect(idx int, inspect func(s *server. } }() - if err := srv.Start(ctx); err != nil { + if err := s.Start(ctx); err != nil { return err } - dbConn, err := srv.ApplicationLayer().SQLConnE(serverArgs.UseDatabase) + dbConn, err := s.ApplicationLayer().SQLConnE(serverArgs.UseDatabase) if err != nil { return err } @@ -1780,8 +1782,8 @@ func (tc *TestCluster) RestartServerWithInspect(idx int, inspect func(s *server. } for i := 0; i < rpc.NumConnectionClasses; i++ { class := rpc.ConnectionClass(i) - if _, err := s.NodeDialer().(*nodedialer.Dialer).Dial(ctx, srv.NodeID(), class); err != nil { - return errors.Wrapf(err, "connecting n%d->n%d (class %v)", s.NodeID(), srv.NodeID(), class) + if _, err := s.NodeDialer().(*nodedialer.Dialer).Dial(ctx, s.NodeID(), class); err != nil { + return errors.Wrapf(err, "connecting n%d->n%d (class %v)", s.NodeID(), s.NodeID(), class) } } } @@ -1817,7 +1819,7 @@ func (tc *TestCluster) GetRaftLeader( testutils.SucceedsSoon(t, func() error { var latestTerm uint64 for i := range tc.Servers { - err := tc.Servers[i].Stores().VisitStores(func(store *kvserver.Store) error { + err := tc.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { repl := store.LookupReplica(key) if repl == nil { // Replica does not exist on this store or there is no raft diff --git a/pkg/testutils/testcluster/testcluster_test.go b/pkg/testutils/testcluster/testcluster_test.go index 6e9820dbcfb..82ecc584c63 100644 --- a/pkg/testutils/testcluster/testcluster_test.go +++ b/pkg/testutils/testcluster/testcluster_test.go @@ -108,12 +108,8 @@ func TestManualReplication(t *testing.T) { } // Transfer the lease to node 1. - leaseHolder, err := tc.FindRangeLeaseHolder( - tableRangeDesc, - &roachpb.ReplicationTarget{ - NodeID: tc.Servers[0].GetNode().Descriptor.NodeID, - StoreID: tc.Servers[0].GetFirstStoreID(), - }) + target := tc.Target(0) + leaseHolder, err := tc.FindRangeLeaseHolder(tableRangeDesc, &target) if err != nil { t.Fatal(err) } @@ -130,19 +126,15 @@ func TestManualReplication(t *testing.T) { // Check that the lease holder has changed. We'll use the old lease holder as // the hint, since it's guaranteed that the old lease holder has applied the // new lease. - leaseHolder, err = tc.FindRangeLeaseHolder( - tableRangeDesc, - &roachpb.ReplicationTarget{ - NodeID: tc.Servers[0].GetNode().Descriptor.NodeID, - StoreID: tc.Servers[0].GetFirstStoreID(), - }) + target = tc.Target(0) + leaseHolder, err = tc.FindRangeLeaseHolder(tableRangeDesc, &target) if err != nil { t.Fatal(err) } if leaseHolder.StoreID != tc.Servers[1].GetFirstStoreID() { t.Fatalf("expected lease on server idx 1 (node: %d store: %d), but is on node: %+v", - tc.Servers[1].GetNode().Descriptor.NodeID, - tc.Servers[1].GetFirstStoreID(), + tc.Server(1).NodeID(), + tc.Server(1).GetFirstStoreID(), leaseHolder) } } diff --git a/pkg/ts/server_test.go b/pkg/ts/server_test.go index 2a18ea93cd5..80858c46ea8 100644 --- a/pkg/ts/server_test.go +++ b/pkg/ts/server_test.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/ts" "github.com/cockroachdb/cockroach/pkg/ts/tspb" @@ -49,10 +48,9 @@ func TestServerQuery(t *testing.T) { }, }) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) // Populate data directly. - tsdb := tsrv.TsDB() + tsdb := s.TsDB().(*ts.DB) if err := tsdb.StoreData(context.Background(), ts.Resolution10s, []tspb.TimeSeriesData{ { Name: "test.metric", @@ -179,7 +177,7 @@ func TestServerQuery(t *testing.T) { }, } - conn := tsrv.RPCClientConn(t, username.RootUserName()) + conn := s.RPCClientConn(t, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) response, err := client.Query(context.Background(), &tspb.TimeSeriesQueryRequest{ StartNanos: 500 * 1e9, @@ -266,14 +264,13 @@ func TestServerQueryStarvation(t *testing.T) { TimeSeriesQueryWorkerMax: workerCount, }) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) seriesCount := workerCount * 2 - if err := populateSeries(seriesCount, 10, 3, tsrv.TsDB()); err != nil { + if err := populateSeries(seriesCount, 10, 3, s.TsDB().(*ts.DB)); err != nil { t.Fatal(err) } - conn := tsrv.RPCClientConn(t, username.RootUserName()) + conn := s.RPCClientConn(t, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) queries := make([]tspb.Query, 0, seriesCount) @@ -305,11 +302,11 @@ func TestServerQueryTenant(t *testing.T) { }, }) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) + systemDB := s.SystemLayer().SQLConn(t, "") // Populate data directly. - tsdb := tsrv.TsDB() + tsdb := s.TsDB().(*ts.DB) if err := tsdb.StoreData(context.Background(), ts.Resolution10s, []tspb.TimeSeriesData{ { Name: "test.metric", @@ -409,7 +406,7 @@ func TestServerQueryTenant(t *testing.T) { }, } - conn := tsrv.RPCClientConn(t, username.RootUserName()) + conn := s.RPCClientConn(t, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) systemResponse, err := client.Query(context.Background(), &tspb.TimeSeriesQueryRequest{ StartNanos: 400 * 1e9, @@ -533,13 +530,12 @@ func TestServerQueryMemoryManagement(t *testing.T) { TimeSeriesQueryMemoryBudget: budget, }) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) - if err := populateSeries(seriesCount, sourceCount, valueCount, tsrv.TsDB()); err != nil { + if err := populateSeries(seriesCount, sourceCount, valueCount, s.TsDB().(*ts.DB)); err != nil { t.Fatal(err) } - conn := tsrv.RPCClientConn(t, username.RootUserName()) + conn := s.RPCClientConn(t, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) queries := make([]tspb.Query, 0, seriesCount) @@ -603,9 +599,8 @@ func TestServerDump(t *testing.T) { }, }) defer s.Stopper().Stop(ctx) - tsrv := s.(*server.TestServer) - if err := populateSeries(seriesCount, sourceCount, valueCount, tsrv.TsDB()); err != nil { + if err := populateSeries(seriesCount, sourceCount, valueCount, s.TsDB().(*ts.DB)); err != nil { t.Fatal(err) } @@ -614,7 +609,7 @@ func TestServerDump(t *testing.T) { names = append(names, seriesName(series)) } - conn := tsrv.RPCClientConn(t, username.RootUserName()) + conn := s.RPCClientConn(t, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) dumpClient, err := client.Dump(ctx, &tspb.DumpRequest{ @@ -729,16 +724,15 @@ func BenchmarkServerQuery(b *testing.B) { s := serverutils.StartServerOnly(b, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) - tsrv := s.(*server.TestServer) // Populate data for large number of time series. seriesCount := 50 sourceCount := 10 - if err := populateSeries(seriesCount, sourceCount, 3, tsrv.TsDB()); err != nil { + if err := populateSeries(seriesCount, sourceCount, 3, s.TsDB().(*ts.DB)); err != nil { b.Fatal(err) } - conn := tsrv.RPCClientConn(b, username.RootUserName()) + conn := s.RPCClientConn(b, username.RootUserName()) client := tspb.NewTimeSeriesClient(conn) queries := make([]tspb.Query, 0, seriesCount) diff --git a/pkg/upgrade/upgrades/system_job_info_test.go b/pkg/upgrade/upgrades/system_job_info_test.go index 9e4f2e2023c..d4438db7dfc 100644 --- a/pkg/upgrade/upgrades/system_job_info_test.go +++ b/pkg/upgrade/upgrades/system_job_info_test.go @@ -60,7 +60,7 @@ func TestSystemJobInfoMigration(t *testing.T) { // We verify that the jobs table gets its version upgraded through // the upgrade, to ensure the creation of job_info synchronizes with // concurrent accesses to the jobs table. - kvDB := tc.Server(0).(*server.TestServer).DB() + kvDB := tc.Server(0).DB() tblBefore := desctestutils.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "system", "public", "jobs") upgrades.Upgrade( From c48783cd5546cccb6843efc673ac0c5e97220047 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Wed, 2 Aug 2023 03:33:32 +0200 Subject: [PATCH 3/4] server: unexport TestServer Release note: None --- pkg/server/connectivity_test.go | 2 +- pkg/server/server.go | 2 +- .../server_controller_channel_orchestrator.go | 2 +- pkg/server/server_controller_test.go | 2 +- pkg/server/server_special_test.go | 8 +- pkg/server/server_test.go | 2 +- pkg/server/settings_cache_test.go | 12 +- pkg/server/testserver.go | 278 +++++++++--------- pkg/server/testserver_http.go | 4 +- pkg/testutils/lint/lint_test.go | 4 +- 10 files changed, 158 insertions(+), 158 deletions(-) diff --git a/pkg/server/connectivity_test.go b/pkg/server/connectivity_test.go index bd7cfeaa4e4..4bab255cd9c 100644 --- a/pkg/server/connectivity_test.go +++ b/pkg/server/connectivity_test.go @@ -46,7 +46,7 @@ func TestClusterConnectivity(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - // TODO(irfansharif): Teach TestServer to accept a list of join addresses + // TODO(irfansharif): Teach testServer to accept a list of join addresses // instead of just one. var testConfigurations = []struct { diff --git a/pkg/server/server.go b/pkg/server/server.go index 348f9060b48..e2a40962811 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1522,7 +1522,7 @@ func (s *Server) PreStart(ctx context.Context) error { // Register the Migration service, to power internal crdb upgrades. migrationServer := &migrationServer{server: s} serverpb.RegisterMigrationServer(s.grpc.Server, migrationServer) - s.migrationServer = migrationServer // only for testing via TestServer + s.migrationServer = migrationServer // only for testing via testServer // Register the KeyVisualizer Server keyvispb.RegisterKeyVisualizerServer(s.grpc.Server, s.keyVisualizerServer) diff --git a/pkg/server/server_controller_channel_orchestrator.go b/pkg/server/server_controller_channel_orchestrator.go index a3587b45236..1b8dae509ad 100644 --- a/pkg/server/server_controller_channel_orchestrator.go +++ b/pkg/server/server_controller_channel_orchestrator.go @@ -45,7 +45,7 @@ func newChannelOrchestrator( // serverStateUsingChannels coordinates the lifecycle of a tenant // server. It ensures sane concurrent behavior between: -// - requests to start a server manually, e.g. via TestServer; +// - requests to start a server manually, e.g. via testServer; // - async changes to the tenant service mode; // - quiescence of the outer stopper; // - RPC drain requests on the tenant server; diff --git a/pkg/server/server_controller_test.go b/pkg/server/server_controller_test.go index 00e157bf6d7..3165c929c0e 100644 --- a/pkg/server/server_controller_test.go +++ b/pkg/server/server_controller_test.go @@ -32,7 +32,7 @@ func TestServerController(t *testing.T) { }) defer s.Stopper().Stop(ctx) - ts := s.(*TestServer) + ts := s.(*testServer) d, err := ts.serverController.getServer(ctx, "system") require.NoError(t, err) diff --git a/pkg/server/server_special_test.go b/pkg/server/server_special_test.go index e3a1b484a03..6d3149f74f4 100644 --- a/pkg/server/server_special_test.go +++ b/pkg/server/server_special_test.go @@ -31,7 +31,7 @@ import ( "github.com/stretchr/testify/require" ) -// Tests in this file have a linter exception against casting to *TestServer. +// Tests in this file have a linter exception against casting to *testServer. func TestPanicRecovery(t *testing.T) { defer leaktest.AfterTest(t)() @@ -39,7 +39,7 @@ func TestPanicRecovery(t *testing.T) { s := serverutils.StartServerOnly(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) - ts := s.(*TestServer) + ts := s.(*testServer) // Enable a test-only endpoint that induces a panic. ts.http.mux.Handle("/panic", http.HandlerFunc(func(http.ResponseWriter, *http.Request) { @@ -109,7 +109,7 @@ func TestSocketAutoNumbering(t *testing.T) { _, expectedPort, err := addr.SplitHostPort(s.SQLAddr(), "") require.NoError(t, err) - if socketPath := s.(*TestServer).Cfg.SocketFile; !strings.HasSuffix(socketPath, "."+expectedPort) { + if socketPath := s.(*testServer).Cfg.SocketFile; !strings.HasSuffix(socketPath, "."+expectedPort) { t.Errorf("expected unix socket ending with port %q, got %q", expectedPort, socketPath) } } @@ -127,7 +127,7 @@ func TestInternalSQL(t *testing.T) { conf.User = "root" // Configure pgx to connect on the loopback listener. conf.DialFunc = func(ctx context.Context, network, addr string) (net.Conn, error) { - return s.(*TestServer).Server.loopbackPgL.Connect(ctx) + return s.(*testServer).Server.loopbackPgL.Connect(ctx) } conn, err := pgx.ConnectConfig(ctx, conf) require.NoError(t, err) diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 9f8aaf3303e..39e8dda9d41 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -204,7 +204,7 @@ func TestPlainHTTPServer(t *testing.T) { }) defer s.Stopper().Stop(context.Background()) - // First, make sure that the TestServer's built-in client interface + // First, make sure that the testServer's built-in client interface // still works in insecure mode. var data serverpb.JSONResponse testutils.SucceedsSoon(t, func() error { diff --git a/pkg/server/settings_cache_test.go b/pkg/server/settings_cache_test.go index d56a7b4370b..0325adc5482 100644 --- a/pkg/server/settings_cache_test.go +++ b/pkg/server/settings_cache_test.go @@ -79,11 +79,11 @@ func TestCachedSettingsServerRestart(t *testing.T) { }, } var settingsCache []roachpb.KeyValue - testServer := serverutils.StartServerOnly(t, serverArgs) - closedts.TargetDuration.Override(ctx, &testServer.ClusterSettings().SV, 10*time.Millisecond) - closedts.SideTransportCloseInterval.Override(ctx, &testServer.ClusterSettings().SV, 10*time.Millisecond) + ts := serverutils.StartServerOnly(t, serverArgs) + closedts.TargetDuration.Override(ctx, &ts.ClusterSettings().SV, 10*time.Millisecond) + closedts.SideTransportCloseInterval.Override(ctx, &ts.ClusterSettings().SV, 10*time.Millisecond) testutils.SucceedsSoon(t, func() error { - store, err := testServer.GetStores().(*kvserver.Stores).GetStore(1) + store, err := ts.GetStores().(*kvserver.Stores).GetStore(1) if err != nil { return err } @@ -97,7 +97,7 @@ func TestCachedSettingsServerRestart(t *testing.T) { settingsCache = settings return nil }) - testServer.Stopper().Stop(context.Background()) + ts.Stopper().Stop(context.Background()) s, err := serverutils.NewServer(serverArgs) if err != nil { @@ -109,7 +109,7 @@ func TestCachedSettingsServerRestart(t *testing.T) { { getDialOpts := s.RPCContext().GRPCDialOptions - initConfig := newInitServerConfig(ctx, s.(*TestServer).Server.cfg, getDialOpts) + initConfig := newInitServerConfig(ctx, s.(*testServer).Server.cfg, getDialOpts) inspectState, err := inspectEngines( context.Background(), s.Engines(), diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index c4c1ad4c8f4..228986d5b99 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -109,7 +109,7 @@ func makeTestBaseConfig(st *cluster.Settings, tr *tracing.Tracer) BaseConfig { baseCfg.SSLCertsDir = certnames.EmbeddedCertsDir // Addr defaults to localhost with port set at time of call to // Start() to an available port. May be overridden later (as in - // makeTestConfigFromParams). Call TestServer.AdvRPCAddr() and + // makeTestConfigFromParams). Call testServer.AdvRPCAddr() and // .AdvSQLAddr() for the full address (including bound port). baseCfg.Addr = util.TestAddr.String() baseCfg.AdvertiseAddr = util.TestAddr.String() @@ -283,7 +283,7 @@ func makeTestConfigFromParams(params base.TestServerArgs) Config { // one specific test must have requested it. A failure is returned if // the Path field is empty, which means the test is then forced to pick // the dir (and the test is then responsible for cleaning it up, not - // TestServer). + // testServer). // HeapProfileDirName and GoroutineDumpDirName are normally set by the // cli, once, to the path of the first store. @@ -328,16 +328,16 @@ func makeTestConfigFromParams(params base.TestServerArgs) Config { return cfg } -// A TestServer encapsulates an in-memory instantiation of a cockroach node with +// A testServer encapsulates an in-memory instantiation of a cockroach node with // a single store. It provides tests with access to Server internals. // Where possible, it should be used through the // serverutils.TestServerInterface. // -// Example usage of a TestServer: +// Example usage of a testServer: // // s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) // defer s.Stopper().Stop() -type TestServer struct { +type testServer struct { Cfg *Config params base.TestServerArgs // server is the embedded Cockroach server struct. @@ -357,38 +357,38 @@ type TestServer struct { disableStartTenantError error } -var _ serverutils.TestServerInterface = &TestServer{} +var _ serverutils.TestServerInterface = &testServer{} // Node returns the Node as an interface{}. -func (ts *TestServer) Node() interface{} { +func (ts *testServer) Node() interface{} { return ts.node } // NodeID returns the ID of this node within its cluster. -func (ts *TestServer) NodeID() roachpb.NodeID { +func (ts *testServer) NodeID() roachpb.NodeID { return ts.rpcContext.NodeID.Get() } // Stopper returns the embedded server's Stopper. -func (ts *TestServer) Stopper() *stop.Stopper { +func (ts *testServer) Stopper() *stop.Stopper { return ts.stopper } // GossipI is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) GossipI() interface{} { +func (ts *testServer) GossipI() interface{} { return ts.Server.gossip } // RangeFeedFactory is part of serverutils.ApplicationLayerInterface. -func (ts *TestServer) RangeFeedFactory() interface{} { +func (ts *testServer) RangeFeedFactory() interface{} { if ts != nil { return ts.sqlServer.execCfg.RangeFeedFactory } return (*rangefeed.Factory)(nil) } -// Clock returns the clock used by the TestServer. -func (ts *TestServer) Clock() *hlc.Clock { +// Clock returns the clock used by the testServer. +func (ts *testServer) Clock() *hlc.Clock { if ts != nil { return ts.clock } @@ -396,7 +396,7 @@ func (ts *TestServer) Clock() *hlc.Clock { } // SQLLivenessProvider returns the sqlliveness.Provider as an interface{}. -func (ts *TestServer) SQLLivenessProvider() interface{} { +func (ts *testServer) SQLLivenessProvider() interface{} { if ts != nil { return ts.sqlServer.execCfg.SQLLiveness } @@ -404,24 +404,24 @@ func (ts *TestServer) SQLLivenessProvider() interface{} { } // JobRegistry returns the *jobs.Registry as an interface{}. -func (ts *TestServer) JobRegistry() interface{} { +func (ts *testServer) JobRegistry() interface{} { if ts != nil { return ts.sqlServer.jobRegistry } return nil } -// NodeLiveness exposes the NodeLiveness instance used by the TestServer as an +// NodeLiveness exposes the NodeLiveness instance used by the testServer as an // interface{}. -func (ts *TestServer) NodeLiveness() interface{} { +func (ts *testServer) NodeLiveness() interface{} { if ts != nil { return ts.nodeLiveness } return nil } -// NodeDialer returns the NodeDialer used by the TestServer. -func (ts *TestServer) NodeDialer() interface{} { +// NodeDialer returns the NodeDialer used by the testServer. +func (ts *testServer) NodeDialer() interface{} { if ts != nil { return ts.nodeDialer } @@ -429,7 +429,7 @@ func (ts *TestServer) NodeDialer() interface{} { } // HeartbeatNodeLiveness heartbeats the server's NodeLiveness record. -func (ts *TestServer) HeartbeatNodeLiveness() error { +func (ts *testServer) HeartbeatNodeLiveness() error { if ts == nil { return errors.New("no node liveness instance") } @@ -450,35 +450,35 @@ func (ts *TestServer) HeartbeatNodeLiveness() error { } // SQLInstanceID is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLInstanceID() base.SQLInstanceID { +func (ts *testServer) SQLInstanceID() base.SQLInstanceID { return ts.sqlServer.sqlIDContainer.SQLInstanceID() } // StatusServer is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) StatusServer() interface{} { +func (ts *testServer) StatusServer() interface{} { return ts.status } -// RPCContext returns the rpc context used by the TestServer. -func (ts *TestServer) RPCContext() *rpc.Context { +// RPCContext returns the rpc context used by the testServer. +func (ts *testServer) RPCContext() *rpc.Context { if ts != nil { return ts.rpcContext } return nil } -// TsDB returns the ts.DB instance used by the TestServer. -func (ts *TestServer) TsDB() interface{} { +// TsDB returns the ts.DB instance used by the testServer. +func (ts *testServer) TsDB() interface{} { return ts.tsDB } // SQLConn is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLConn(test serverutils.TestFataler, dbName string) *gosql.DB { +func (ts *testServer) SQLConn(test serverutils.TestFataler, dbName string) *gosql.DB { return ts.SQLConnForUser(test, username.RootUser, dbName) } // SQLConnForUser is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLConnForUser( +func (ts *testServer) SQLConnForUser( test serverutils.TestFataler, userName, dbName string, ) *gosql.DB { db, err := ts.SQLConnForUserE(userName, dbName) @@ -489,12 +489,12 @@ func (ts *TestServer) SQLConnForUser( } // SQLConnE is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLConnE(dbName string) (*gosql.DB, error) { +func (ts *testServer) SQLConnE(dbName string) (*gosql.DB, error) { return ts.SQLConnForUserE(username.RootUser, dbName) } // SQLConnForUserE is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLConnForUserE(userName string, dbName string) (*gosql.DB, error) { +func (ts *testServer) SQLConnForUserE(userName string, dbName string) (*gosql.DB, error) { return openTestSQLConn(userName, dbName, ts.Stopper(), ts.Server.loopbackPgL, ts.cfg.SQLAdvertiseAddr, @@ -502,17 +502,17 @@ func (ts *TestServer) SQLConnForUserE(userName string, dbName string) (*gosql.DB ) } -// DB returns the client.DB instance used by the TestServer. -func (ts *TestServer) DB() *kv.DB { +// DB returns the client.DB instance used by the testServer. +func (ts *testServer) DB() *kv.DB { if ts != nil { return ts.db } return nil } -// PGServer exposes the pgwire.Server instance used by the TestServer as an +// PGServer exposes the pgwire.Server instance used by the testServer as an // interface{}. -func (ts *TestServer) PGServer() interface{} { +func (ts *testServer) PGServer() interface{} { if ts != nil { return ts.sqlServer.pgServer } @@ -520,8 +520,8 @@ func (ts *TestServer) PGServer() interface{} { } // PGPreServer exposes the pgwire.PreServeConnHandler instance used by -// the TestServer. -func (ts *TestServer) PGPreServer() interface{} { +// the testServer. +func (ts *testServer) PGPreServer() interface{} { if ts != nil { return ts.pgPreServer } @@ -529,7 +529,7 @@ func (ts *TestServer) PGPreServer() interface{} { } // RaftTransport is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) RaftTransport() interface{} { +func (ts *testServer) RaftTransport() interface{} { if ts != nil { return ts.raftTransport } @@ -539,12 +539,12 @@ func (ts *TestServer) RaftTransport() interface{} { // AmbientCtx implements serverutils.ApplicationLayerInterface. This // retrieves the ambient context for this server. This is intended for // exclusive use by test code. -func (ts *TestServer) AmbientCtx() log.AmbientContext { +func (ts *testServer) AmbientCtx() log.AmbientContext { return ts.Cfg.AmbientCtx } -// TestingKnobs returns the TestingKnobs used by the TestServer. -func (ts *TestServer) TestingKnobs() *base.TestingKnobs { +// TestingKnobs returns the TestingKnobs used by the testServer. +func (ts *testServer) TestingKnobs() *base.TestingKnobs { if ts != nil { return &ts.Cfg.TestingKnobs } @@ -552,27 +552,27 @@ func (ts *TestServer) TestingKnobs() *base.TestingKnobs { } // SQLServerInternal is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLServerInternal() interface{} { +func (ts *testServer) SQLServerInternal() interface{} { return ts.sqlServer } -// TenantStatusServer returns the TenantStatusServer used by the TestServer. -func (ts *TestServer) TenantStatusServer() interface{} { +// TenantStatusServer returns the TenantStatusServer used by the testServer. +func (ts *testServer) TenantStatusServer() interface{} { return ts.status } // TestTenants provides information to tenant(s) that _may_ have been created -func (ts *TestServer) TestTenants() []serverutils.ApplicationLayerInterface { +func (ts *testServer) TestTenants() []serverutils.ApplicationLayerInterface { return ts.testTenants } // DefaultTestTenantDisabled is part of the serverutils.TenantControlInterface. -func (ts *TestServer) DefaultTestTenantDisabled() bool { +func (ts *testServer) DefaultTestTenantDisabled() bool { return ts.cfg.DisableDefaultTestTenant } // DisableDefaultTestTenant is part of the serverutils.TenantControlInterface. -func (ts *TestServer) DisableDefaultTestTenant() { +func (ts *testServer) DisableDefaultTestTenant() { ts.cfg.DisableDefaultTestTenant = true } @@ -582,7 +582,7 @@ func (ts *TestServer) DisableDefaultTestTenant() { // currently only attempt to start a test tenant if we're running in an // enterprise enabled build. This is due to licensing restrictions on the MT // capabilities. -func (ts *TestServer) maybeStartDefaultTestTenant(ctx context.Context) error { +func (ts *testServer) maybeStartDefaultTestTenant(ctx context.Context) error { // If the flag has been set to disable the default test tenant, don't start // it here. if ts.params.DefaultTestTenant.TestTenantAlwaysDisabled() || ts.cfg.DisableDefaultTestTenant { @@ -659,13 +659,13 @@ func (ts *TestServer) maybeStartDefaultTestTenant(ctx context.Context) error { return nil } -// Start starts the TestServer by bootstrapping an in-memory store +// Start starts the testServer by bootstrapping an in-memory store // (defaults to maximum of 100M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of -// TestServer.AdvRPCAddr() after Start() for client connections. -// Use TestServer.Stopper().Stop() to shutdown the server after the test +// testServer.AdvRPCAddr() after Start() for client connections. +// Use testServer.Stopper().Stop() to shutdown the server after the test // completes. -func (ts *TestServer) Start(ctx context.Context) error { +func (ts *testServer) Start(ctx context.Context) error { if err := ts.Server.PreStart(ctx); err != nil { return err } @@ -684,7 +684,7 @@ func (ts *TestServer) Start(ctx context.Context) error { if err := ts.maybeStartDefaultTestTenant(ctx); err != nil { // We're failing the call to this function but we've already started - // the TestServer above. Stop it here to avoid leaking the server. + // the testServer above. Stop it here to avoid leaking the server. ts.Stopper().Stop(context.Background()) return err } @@ -703,7 +703,7 @@ func (ts *TestServer) Start(ctx context.Context) error { } // Stop is part of the serverutils.TestServerInterface. -func (ts *TestServer) Stop(ctx context.Context) { +func (ts *testServer) Stop(ctx context.Context) { ctx = ts.Server.AnnotateCtx(ctx) ts.Server.stopper.Stop(ctx) } @@ -805,7 +805,7 @@ func (t *TestTenant) PGServer() interface{} { } // PGPreServer exposes the pgwire.PreServeConnHandler instance used by -// the TestServer. +// the testServer. func (ts *TestTenant) PGPreServer() interface{} { if ts != nil { return ts.pgPreServer @@ -1008,7 +1008,7 @@ func (t *TestTenant) SettingsWatcher() interface{} { } // StartSharedProcessTenant is part of the serverutils.TenantControlInterface. -func (ts *TestServer) StartSharedProcessTenant( +func (ts *testServer) StartSharedProcessTenant( ctx context.Context, args base.TestSharedProcessTenantArgs, ) (serverutils.ApplicationLayerInterface, *gosql.DB, error) { if err := args.TenantName.IsValid(); err != nil { @@ -1127,7 +1127,7 @@ func (ts *TestServer) StartSharedProcessTenant( } // DisableStartTenant is part of the serverutils.TenantControlInterface. -func (ts *TestServer) DisableStartTenant(reason error) { +func (ts *testServer) DisableStartTenant(reason error) { ts.disableStartTenantError = reason } @@ -1187,7 +1187,7 @@ func (t *TestTenant) HTTPAuthServer() interface{} { return t.t.authentication } -func (ts *TestServer) waitForTenantReadinessImpl( +func (ts *testServer) waitForTenantReadinessImpl( ctx context.Context, tenantID roachpb.TenantID, ) error { _, infoWatcher, err := ts.node.waitForTenantWatcherReadiness(ctx) @@ -1231,7 +1231,7 @@ func (ts *TestServer) waitForTenantReadinessImpl( } // WaitForTenantReadiness is part of serverutils.TenantControlInterface.. -func (ts *TestServer) WaitForTenantReadiness(ctx context.Context, tenantID roachpb.TenantID) error { +func (ts *testServer) WaitForTenantReadiness(ctx context.Context, tenantID roachpb.TenantID) error { // Two minutes should be sufficient for the in-RAM caches to be hydrated with // the tenant record. return timeutil.RunWithTimeout(ctx, "waitForTenantReadiness", 2*time.Minute, func(ctx context.Context) error { @@ -1240,7 +1240,7 @@ func (ts *TestServer) WaitForTenantReadiness(ctx context.Context, tenantID roach } // StartTenant is part of the serverutils.TenantControlInterface. -func (ts *TestServer) StartTenant( +func (ts *testServer) StartTenant( ctx context.Context, params base.TestTenantArgs, ) (serverutils.ApplicationLayerInterface, error) { if ts.disableStartTenantError != nil { @@ -1529,7 +1529,7 @@ func (ts *TestServer) StartTenant( // be on the server after initial (asynchronous) splits have been completed, // assuming no additional information is added outside of the normal bootstrap // process. -func (ts *TestServer) ExpectedInitialRangeCount() (int, error) { +func (ts *testServer) ExpectedInitialRangeCount() (int, error) { return ExpectedInitialRangeCount( ts.sqlServer.execCfg.Codec, &ts.cfg.DefaultZoneConfig, @@ -1550,79 +1550,79 @@ func ExpectedInitialRangeCount( } // GetStores is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) GetStores() interface{} { +func (ts *testServer) GetStores() interface{} { return ts.node.stores } // ClusterSettings returns the ClusterSettings. -func (ts *TestServer) ClusterSettings() *cluster.Settings { +func (ts *testServer) ClusterSettings() *cluster.Settings { return ts.Cfg.Settings } // SettingsWatcher is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SettingsWatcher() interface{} { +func (ts *testServer) SettingsWatcher() interface{} { return ts.sqlServer.settingsWatcher } -// Engines returns the TestServer's engines. -func (ts *TestServer) Engines() []storage.Engine { +// Engines returns the testServer's engines. +func (ts *testServer) Engines() []storage.Engine { return ts.engines } // AdvRPCAddr returns the server's RPC address. Should be used by clients. -func (ts *TestServer) AdvRPCAddr() string { +func (ts *testServer) AdvRPCAddr() string { return ts.cfg.AdvertiseAddr } // AdvSQLAddr returns the server's SQL address. Should be used by clients. -func (ts *TestServer) AdvSQLAddr() string { +func (ts *testServer) AdvSQLAddr() string { return ts.cfg.SQLAdvertiseAddr } // HTTPAddr returns the server's HTTP address. Should be used by clients. -func (ts *TestServer) HTTPAddr() string { +func (ts *testServer) HTTPAddr() string { return ts.cfg.HTTPAddr } // RPCAddr returns the server's listening RPC address. // Note: use AdvRPCAddr() instead unless there is a specific reason not to. -func (ts *TestServer) RPCAddr() string { +func (ts *testServer) RPCAddr() string { return ts.cfg.Addr } // SQLAddr returns the server's listening SQL address. // Note: use AdvSQLAddr() instead unless there is a specific reason not to. -func (ts *TestServer) SQLAddr() string { +func (ts *testServer) SQLAddr() string { return ts.cfg.SQLAddr } // DrainClients exports the drainClients() method for use by tests. -func (ts *TestServer) DrainClients(ctx context.Context) error { +func (ts *testServer) DrainClients(ctx context.Context) error { return ts.drain.drainClients(ctx, nil /* reporter */) } // Readiness is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) Readiness(ctx context.Context) error { +func (ts *testServer) Readiness(ctx context.Context) error { return ts.admin.checkReadinessForHealthCheck(ctx) } // SetReadyFn is part of TestServerInterface. -func (ts *TestServer) SetReadyFn(fn func(bool)) { +func (ts *testServer) SetReadyFn(fn func(bool)) { ts.Server.cfg.ReadyFn = fn } // WriteSummaries implements the serverutils.StorageLayerInterface. -func (ts *TestServer) WriteSummaries() error { +func (ts *testServer) WriteSummaries() error { return ts.node.writeNodeStatus(context.TODO(), time.Hour, false) } // UpdateChecker implements the serverutils.StorageLayerInterface. -func (ts *TestServer) UpdateChecker() interface{} { +func (ts *testServer) UpdateChecker() interface{} { return ts.Server.updates } // DiagnosticsReporter implements the serverutils.ApplicationLayerInterface. -func (ts *TestServer) DiagnosticsReporter() interface{} { +func (ts *testServer) DiagnosticsReporter() interface{} { return ts.Server.sqlServer.diagnosticsReporter } @@ -1638,12 +1638,12 @@ func (v *v2AuthDecorator) RoundTrip(r *http.Request) (*http.Response, error) { } // MustGetSQLCounter implements serverutils.ApplicationLayerInterface. -func (ts *TestServer) MustGetSQLCounter(name string) int64 { +func (ts *testServer) MustGetSQLCounter(name string) int64 { return mustGetSQLCounterForRegistry(ts.registry, name) } // MustGetSQLNetworkCounter implements the serverutils.ApplicationLayerInterface. -func (ts *TestServer) MustGetSQLNetworkCounter(name string) int64 { +func (ts *testServer) MustGetSQLNetworkCounter(name string) int64 { reg := metric.NewRegistry() for _, m := range ts.sqlServer.pgServer.Metrics() { reg.AddMetricStruct(m) @@ -1652,52 +1652,52 @@ func (ts *TestServer) MustGetSQLNetworkCounter(name string) int64 { } // Locality is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) Locality() *roachpb.Locality { +func (ts *testServer) Locality() *roachpb.Locality { return &ts.cfg.Locality } // LeaseManager is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) LeaseManager() interface{} { +func (ts *testServer) LeaseManager() interface{} { return ts.sqlServer.leaseMgr } // InternalExecutor is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) InternalExecutor() interface{} { +func (ts *testServer) InternalExecutor() interface{} { return ts.sqlServer.internalExecutor } // InternalDB is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) InternalDB() interface{} { +func (ts *testServer) InternalDB() interface{} { return ts.sqlServer.internalDB } // GetNode exposes the Server's Node. -func (ts *TestServer) GetNode() *Node { +func (ts *testServer) GetNode() *Node { return ts.node } // DistSenderI is part of DistSenderInterface. -func (ts *TestServer) DistSenderI() interface{} { +func (ts *testServer) DistSenderI() interface{} { return ts.distSender } // MigrationServer is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) MigrationServer() interface{} { +func (ts *testServer) MigrationServer() interface{} { return ts.Server.migrationServer } // SpanConfigKVAccessor is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) SpanConfigKVAccessor() interface{} { +func (ts *testServer) SpanConfigKVAccessor() interface{} { return ts.Server.node.spanConfigAccessor } // SpanConfigReporter is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) SpanConfigReporter() interface{} { +func (ts *testServer) SpanConfigReporter() interface{} { return ts.Server.node.spanConfigReporter } // SpanConfigReconciler is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SpanConfigReconciler() interface{} { +func (ts *testServer) SpanConfigReconciler() interface{} { if ts.sqlServer.spanconfigMgr == nil { panic("uninitialized; see EnableSpanConfigs testing knob to use span configs") } @@ -1705,7 +1705,7 @@ func (ts *TestServer) SpanConfigReconciler() interface{} { } // SpanConfigSQLTranslatorFactory is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SpanConfigSQLTranslatorFactory() interface{} { +func (ts *testServer) SpanConfigSQLTranslatorFactory() interface{} { if ts.sqlServer.spanconfigSQLTranslatorFactory == nil { panic("uninitialized; see EnableSpanConfigs testing knob to use span configs") } @@ -1713,7 +1713,7 @@ func (ts *TestServer) SpanConfigSQLTranslatorFactory() interface{} { } // SpanConfigSQLWatcher is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SpanConfigSQLWatcher() interface{} { +func (ts *testServer) SpanConfigSQLWatcher() interface{} { if ts.sqlServer.spanconfigSQLWatcher == nil { panic("uninitialized; see EnableSpanConfigs testing knob to use span configs") } @@ -1721,22 +1721,22 @@ func (ts *TestServer) SpanConfigSQLWatcher() interface{} { } // SQLServer is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SQLServer() interface{} { +func (ts *testServer) SQLServer() interface{} { return ts.sqlServer.pgServer.SQLServer } // DistSQLServer is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) DistSQLServer() interface{} { +func (ts *testServer) DistSQLServer() interface{} { return ts.sqlServer.distSQLServer } // SetDistSQLSpanResolver is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SetDistSQLSpanResolver(spanResolver interface{}) { +func (ts *testServer) SetDistSQLSpanResolver(spanResolver interface{}) { ts.sqlServer.execCfg.DistSQLPlanner.SetSpanResolver(spanResolver.(physicalplan.SpanResolver)) } // GetFirstStoreID is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) GetFirstStoreID() roachpb.StoreID { +func (ts *testServer) GetFirstStoreID() roachpb.StoreID { firstStoreID := roachpb.StoreID(-1) err := ts.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { if firstStoreID == -1 { @@ -1751,7 +1751,7 @@ func (ts *TestServer) GetFirstStoreID() roachpb.StoreID { } // LookupRange returns the descriptor of the range containing key. -func (ts *TestServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) { +func (ts *testServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) { rs, _, err := kv.RangeLookup(context.Background(), ts.DB().NonTransactionalSender(), key, kvpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) if err != nil { @@ -1762,7 +1762,7 @@ func (ts *TestServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, err } // MergeRanges merges the range containing leftKey with the range to its right. -func (ts *TestServer) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, error) { +func (ts *testServer) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, error) { ctx := context.Background() mergeReq := kvpb.AdminMergeRequest{ @@ -1780,7 +1780,7 @@ func (ts *TestServer) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, } // SplitRangeWithExpiration is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) SplitRangeWithExpiration( +func (ts *testServer) SplitRangeWithExpiration( splitKey roachpb.Key, expirationTime hlc.Timestamp, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { ctx := context.Background() @@ -1852,14 +1852,14 @@ func (ts *TestServer) SplitRangeWithExpiration( // SplitRange is exactly like SplitRangeWithExpiration, except that it creates a // split with a sticky bit that never expires. -func (ts *TestServer) SplitRange( +func (ts *testServer) SplitRange( splitKey roachpb.Key, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { return ts.SplitRangeWithExpiration(splitKey, hlc.MaxTimestamp) } // GetRangeLease is part of severutils.StorageLayerInterface. -func (ts *TestServer) GetRangeLease( +func (ts *testServer) GetRangeLease( ctx context.Context, key roachpb.Key, queryPolicy roachpb.LeaseInfoOpt, ) (_ roachpb.LeaseInfo, now hlc.ClockTimestamp, _ error) { leaseReq := kvpb.LeaseInfoRequest{ @@ -1902,17 +1902,17 @@ func (ts *TestServer) GetRangeLease( } // ExecutorConfig is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) ExecutorConfig() interface{} { +func (ts *testServer) ExecutorConfig() interface{} { return *ts.sqlServer.execCfg } // StartedDefaultTestTenant is part of the serverutils.TenantControlInterface. -func (ts *TestServer) StartedDefaultTestTenant() bool { +func (ts *testServer) StartedDefaultTestTenant() bool { return len(ts.testTenants) > 0 } // ApplicationLayer is part of the serverutils.TestServerInterface. -func (ts *TestServer) ApplicationLayer() serverutils.ApplicationLayerInterface { +func (ts *testServer) ApplicationLayer() serverutils.ApplicationLayerInterface { if ts.StartedDefaultTestTenant() { return ts.testTenants[0] } @@ -1920,32 +1920,32 @@ func (ts *TestServer) ApplicationLayer() serverutils.ApplicationLayerInterface { } // StorageLayer is part of the serverutils.TestServerInterface. -func (ts *TestServer) StorageLayer() serverutils.StorageLayerInterface { +func (ts *testServer) StorageLayer() serverutils.StorageLayerInterface { return ts } // TenantController is part of the serverutils.TestServerInterface. -func (ts *TestServer) TenantController() serverutils.TenantControlInterface { +func (ts *testServer) TenantController() serverutils.TenantControlInterface { return ts } // SystemLayer is part of the serverutils.TestServerInterface. -func (ts *TestServer) SystemLayer() serverutils.ApplicationLayerInterface { +func (ts *testServer) SystemLayer() serverutils.ApplicationLayerInterface { return ts } // TracerI is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) TracerI() interface{} { +func (ts *testServer) TracerI() interface{} { return ts.Tracer() } // Tracer is like TracerI(), but returns the actual type. -func (ts *TestServer) Tracer() *tracing.Tracer { +func (ts *testServer) Tracer() *tracing.Tracer { return ts.node.storeCfg.AmbientCtx.Tracer } // ForceTableGC is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) ForceTableGC( +func (ts *testServer) ForceTableGC( ctx context.Context, database, table string, timestamp hlc.Timestamp, ) error { return internalForceTableGC(ctx, ts.SystemLayer(), database, table, timestamp) @@ -1975,17 +1975,17 @@ func internalForceTableGC( } // DefaultZoneConfig is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) DefaultZoneConfig() zonepb.ZoneConfig { +func (ts *testServer) DefaultZoneConfig() zonepb.ZoneConfig { return *ts.SystemConfigProvider().GetSystemConfig().DefaultZoneConfig } // DefaultSystemZoneConfig is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) DefaultSystemZoneConfig() zonepb.ZoneConfig { +func (ts *testServer) DefaultSystemZoneConfig() zonepb.ZoneConfig { return ts.Server.cfg.DefaultSystemZoneConfig } // ScratchRange is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) ScratchRange() (roachpb.Key, error) { +func (ts *testServer) ScratchRange() (roachpb.Key, error) { _, desc, err := ts.ScratchRangeEx() if err != nil { return nil, err @@ -1994,13 +1994,13 @@ func (ts *TestServer) ScratchRange() (roachpb.Key, error) { } // ScratchRangeEx is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) ScratchRangeEx() (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { +func (ts *testServer) ScratchRangeEx() (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { scratchKey := keys.ScratchRangeMin return ts.SplitRange(scratchKey) } // ScratchRangeWithExpirationLease is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) ScratchRangeWithExpirationLease() (roachpb.Key, error) { +func (ts *testServer) ScratchRangeWithExpirationLease() (roachpb.Key, error) { _, desc, err := ts.ScratchRangeWithExpirationLeaseEx() if err != nil { return nil, err @@ -2009,7 +2009,7 @@ func (ts *TestServer) ScratchRangeWithExpirationLease() (roachpb.Key, error) { } // ScratchRangeWithExpirationLeaseEx is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) ScratchRangeWithExpirationLeaseEx() ( +func (ts *testServer) ScratchRangeWithExpirationLeaseEx() ( roachpb.RangeDescriptor, roachpb.RangeDescriptor, error, @@ -2020,57 +2020,57 @@ func (ts *TestServer) ScratchRangeWithExpirationLeaseEx() ( } // RaftConfig is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) RaftConfig() base.RaftConfig { +func (ts *testServer) RaftConfig() base.RaftConfig { return ts.Cfg.RaftConfig } // MetricsRecorder periodically records node-level and store-level metrics. -func (ts *TestServer) MetricsRecorder() *status.MetricsRecorder { +func (ts *testServer) MetricsRecorder() *status.MetricsRecorder { return ts.node.recorder } // CollectionFactory is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) CollectionFactory() interface{} { +func (ts *testServer) CollectionFactory() interface{} { return ts.sqlServer.execCfg.CollectionFactory } // SystemTableIDResolver is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SystemTableIDResolver() interface{} { +func (ts *testServer) SystemTableIDResolver() interface{} { return ts.sqlServer.execCfg.SystemTableIDResolver } // SpanConfigKVSubscriber is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) SpanConfigKVSubscriber() interface{} { +func (ts *testServer) SpanConfigKVSubscriber() interface{} { return ts.node.storeCfg.SpanConfigSubscriber } // SystemConfigProvider is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SystemConfigProvider() config.SystemConfigProvider { +func (ts *testServer) SystemConfigProvider() config.SystemConfigProvider { return ts.node.storeCfg.SystemConfigProvider } // KVFlowController is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) KVFlowController() interface{} { +func (ts *testServer) KVFlowController() interface{} { return ts.node.storeCfg.KVFlowController } // KVFlowHandles is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) KVFlowHandles() interface{} { +func (ts *testServer) KVFlowHandles() interface{} { return ts.node.storeCfg.KVFlowHandles } // Codec is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) Codec() keys.SQLCodec { +func (ts *testServer) Codec() keys.SQLCodec { return ts.ExecutorConfig().(sql.ExecutorConfig).Codec } // RangeDescIteratorFactory is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) RangeDescIteratorFactory() interface{} { +func (ts *testServer) RangeDescIteratorFactory() interface{} { return ts.sqlServer.execCfg.RangeDescIteratorFactory } // BinaryVersionOverride is part of the serverutils.TestServerInterface. -func (ts *TestServer) BinaryVersionOverride() roachpb.Version { +func (ts *testServer) BinaryVersionOverride() roachpb.Version { knobs := ts.TestingKnobs().Server if knobs == nil { return roachpb.Version{} @@ -2079,48 +2079,48 @@ func (ts *TestServer) BinaryVersionOverride() roachpb.Version { } // KvProber is part of the serverutils.StorageLayerInterface. -func (ts *TestServer) KvProber() *kvprober.Prober { +func (ts *testServer) KvProber() *kvprober.Prober { return ts.Server.kvProber } // QueryDatabaseID is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) QueryDatabaseID( +func (ts *testServer) QueryDatabaseID( ctx context.Context, userName username.SQLUsername, dbName string, ) (descpb.ID, error) { return ts.admin.queryDatabaseID(ctx, userName, dbName) } // QueryTableID is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) QueryTableID( +func (ts *testServer) QueryTableID( ctx context.Context, userName username.SQLUsername, dbName, tbName string, ) (descpb.ID, error) { return ts.admin.queryTableID(ctx, userName, dbName, tbName) } // StatsForSpans is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) StatsForSpan( +func (ts *testServer) StatsForSpan( ctx context.Context, span roachpb.Span, ) (*serverpb.TableStatsResponse, error) { return ts.admin.statsForSpan(ctx, span) } // SetReady is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SetReady(ready bool) { +func (ts *testServer) SetReady(ready bool) { ts.sqlServer.isReady.Set(ready) } // SetAcceptSQLWithoutTLS is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) SetAcceptSQLWithoutTLS(accept bool) { +func (ts *testServer) SetAcceptSQLWithoutTLS(accept bool) { ts.Cfg.AcceptSQLWithoutTLS = accept } // PrivilegeChecker is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) PrivilegeChecker() interface{} { +func (ts *testServer) PrivilegeChecker() interface{} { return ts.admin.privilegeChecker } // HTTPAuthServer is part of the ApplicationLayerInterface. -func (ts *TestServer) HTTPAuthServer() interface{} { +func (ts *testServer) HTTPAuthServer() interface{} { return ts.t.authentication } @@ -2186,7 +2186,7 @@ func (testServerFactoryImpl) New(params base.TestServerArgs) (interface{}, error } cfg := makeTestConfigFromParams(params) - ts := &TestServer{Cfg: &cfg, params: params} + ts := &testServer{Cfg: &cfg, params: params} if params.Stopper == nil { params.Stopper = stop.NewStopper() @@ -2285,7 +2285,7 @@ func TestingMakeLoggingContexts( } // NewClientRPCContext is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) NewClientRPCContext( +func (ts *testServer) NewClientRPCContext( ctx context.Context, user username.SQLUsername, ) *rpc.Context { return newClientRPCContext(ctx, user, @@ -2296,7 +2296,7 @@ func (ts *TestServer) NewClientRPCContext( } // RPCClientConn is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) RPCClientConn( +func (ts *testServer) RPCClientConn( test serverutils.TestFataler, user username.SQLUsername, ) *grpc.ClientConn { conn, err := ts.RPCClientConnE(user) @@ -2307,20 +2307,20 @@ func (ts *TestServer) RPCClientConn( } // RPCClientConnE is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) RPCClientConnE(user username.SQLUsername) (*grpc.ClientConn, error) { +func (ts *testServer) RPCClientConnE(user username.SQLUsername) (*grpc.ClientConn, error) { ctx := context.Background() rpcCtx := ts.NewClientRPCContext(ctx, user) return rpcCtx.GRPCDialNode(ts.AdvRPCAddr(), ts.NodeID(), rpc.DefaultClass).Connect(ctx) } // GetAdminClient is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) GetAdminClient(test serverutils.TestFataler) serverpb.AdminClient { +func (ts *testServer) GetAdminClient(test serverutils.TestFataler) serverpb.AdminClient { conn := ts.RPCClientConn(test, username.RootUserName()) return serverpb.NewAdminClient(conn) } // GetStatusClient is part of the serverutils.ApplicationLayerInterface. -func (ts *TestServer) GetStatusClient(test serverutils.TestFataler) serverpb.StatusClient { +func (ts *testServer) GetStatusClient(test serverutils.TestFataler) serverpb.StatusClient { conn := ts.RPCClientConn(test, username.RootUserName()) return serverpb.NewStatusClient(conn) } diff --git a/pkg/server/testserver_http.go b/pkg/server/testserver_http.go index 597ae58a820..b465ecee75b 100644 --- a/pkg/server/testserver_http.go +++ b/pkg/server/testserver_http.go @@ -29,12 +29,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/protoutil" ) -// httpTestServer is embedded in TestServer / TenantServer to +// httpTestServer is embedded in testServer / TenantServer to // provide the HTTP API subset of ApplicationLayerInterface. type httpTestServer struct { t struct { // We need a sub-struct to avoid ambiguous overlap with the fields - // of *Server, which are also embedded in TestServer. + // of *Server, which are also embedded in testServer. authentication authserver.Server sqlServer *SQLServer tenantName roachpb.TenantName diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index c063fc58b1a..9b5b28daba5 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -624,9 +624,9 @@ func TestLint(t *testing.T) { "git", "grep", "-nE", - `\*(server\.)?TestServer`, + `\*testServer`, "--", - "*_test.go", + "server/*_test.go", ":!server/server_special_test.go", ":!server/server_controller_test.go", ":!server/settings_cache_test.go", From eaf0f229c44b00b723ebb2b911d1cf91a4283738 Mon Sep 17 00:00:00 2001 From: Raphael 'kena' Poss Date: Wed, 2 Aug 2023 03:49:31 +0200 Subject: [PATCH 4/4] server: unexport TestTenant Release note: None --- pkg/ccl/changefeedccl/helpers_test.go | 2 +- pkg/ccl/serverccl/server_controller_test.go | 27 ---- pkg/ccl/sqlproxyccl/proxy_handler_test.go | 7 +- pkg/server/server_controller_test.go | 27 ++++ pkg/server/tenant.go | 2 +- pkg/server/testserver.go | 146 ++++++++++---------- pkg/testutils/lint/lint_test.go | 2 +- 7 files changed, 106 insertions(+), 107 deletions(-) diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index ee9a735c4e1..7afe35cd164 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -822,7 +822,7 @@ func makeTenantServerWithOptions( TestServer: TestServer{ DB: tenantDB, Server: tenantServer, - TestingKnobs: tenantServer.(*server.TestTenant).Cfg.TestingKnobs, + TestingKnobs: *tenantServer.TestingKnobs(), Codec: keys.MakeSQLCodec(tenantID), }, SystemDB: systemDB, diff --git a/pkg/ccl/serverccl/server_controller_test.go b/pkg/ccl/serverccl/server_controller_test.go index cc994ed8a39..c05c3a93ed2 100644 --- a/pkg/ccl/serverccl/server_controller_test.go +++ b/pkg/ccl/serverccl/server_controller_test.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/authserver" "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -125,32 +124,6 @@ ALTER TENANT application START SERVICE SHARED`) } } -func TestSharedProcessServerInheritsTempStorageLimit(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - const specialSize = 123123123 - - // Start a server with a custom temp storage limit. - ctx := context.Background() - st := cluster.MakeClusterSettings() - s := serverutils.StartServerOnly(t, base.TestServerArgs{ - Settings: st, - TempStorageConfig: base.DefaultTestTempStorageConfigWithSize(st, specialSize), - DefaultTestTenant: base.TestControlsTenantsExplicitly, - }) - defer s.Stopper().Stop(ctx) - - // Start a shared process tenant server. - ts, _, err := s.StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ - TenantName: "hello", - }) - require.NoError(t, err) - - tss := ts.(*server.TestTenant) - require.Equal(t, int64(specialSize), tss.SQLCfg.TempStorageConfig.Mon.Limit()) -} - func TestServerControllerHTTP(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/ccl/sqlproxyccl/proxy_handler_test.go b/pkg/ccl/sqlproxyccl/proxy_handler_test.go index a52fc5cadff..d4b8ad0f561 100644 --- a/pkg/ccl/sqlproxyccl/proxy_handler_test.go +++ b/pkg/ccl/sqlproxyccl/proxy_handler_test.go @@ -33,7 +33,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/sqlproxyccl/tenantdirsvr" "github.com/cockroachdb/cockroach/pkg/ccl/sqlproxyccl/throttler" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/pgwire" @@ -1933,7 +1932,7 @@ func TestConnectionMigration(t *testing.T) { // Start first SQL pod. tenant1, tenantDB1 := serverutils.StartTenant(t, s, tests.CreateTestTenantParams(tenantID)) - tenant1.(*server.TestTenant).PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) + tenant1.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) defer tenant1.Stopper().Stop(ctx) defer tenantDB1.Close() @@ -1941,7 +1940,7 @@ func TestConnectionMigration(t *testing.T) { params2 := tests.CreateTestTenantParams(tenantID) params2.DisableCreateTenant = true tenant2, tenantDB2 := serverutils.StartTenant(t, s, params2) - tenant2.(*server.TestTenant).PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) + tenant2.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) defer tenant2.Stopper().Stop(ctx) defer tenantDB2.Close() @@ -2989,7 +2988,7 @@ func startTestTenantPodsWithStopper( params.TestingKnobs = knobs params.Stopper = stopper tenant, tenantDB := serverutils.StartTenant(t, ts, params) - tenant.(*server.TestTenant).PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) + tenant.PGPreServer().(*pgwire.PreServeConnHandler).TestingSetTrustClientProvidedRemoteAddr(true) // Create a test user. We only need to do it once. if i == 0 { diff --git a/pkg/server/server_controller_test.go b/pkg/server/server_controller_test.go index 3165c929c0e..b391d53b6c0 100644 --- a/pkg/server/server_controller_test.go +++ b/pkg/server/server_controller_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -82,3 +83,29 @@ func TestSQLErrorUponInvalidTenant(t *testing.T) { err = db.Ping() require.Regexp(t, `service unavailable for target tenant \(nonexistent\)`, err.Error()) } + +func TestSharedProcessServerInheritsTempStorageLimit(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + const specialSize = 123123123 + + // Start a server with a custom temp storage limit. + ctx := context.Background() + st := cluster.MakeClusterSettings() + s := serverutils.StartServerOnly(t, base.TestServerArgs{ + Settings: st, + TempStorageConfig: base.DefaultTestTempStorageConfigWithSize(st, specialSize), + DefaultTestTenant: base.TestControlsTenantsExplicitly, + }) + defer s.Stopper().Stop(ctx) + + // Start a shared process tenant server. + ts, _, err := s.StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{ + TenantName: "hello", + }) + require.NoError(t, err) + + tss := ts.(*testTenant) + require.Equal(t, int64(specialSize), tss.SQLCfg.TempStorageConfig.Mon.Limit()) +} diff --git a/pkg/server/tenant.go b/pkg/server/tenant.go index 33cdb605ef1..409ff87294b 100644 --- a/pkg/server/tenant.go +++ b/pkg/server/tenant.go @@ -359,7 +359,7 @@ func newTenantServer( // Instantiate the migration API server. tms := newTenantMigrationServer(sqlServer) serverpb.RegisterMigrationServer(args.grpc.Server, tms) - sqlServer.migrationServer = tms // only for testing via TestTenant + sqlServer.migrationServer = tms // only for testing via testTenant // Tell the authz server how to connect to SQL. adminAuthzCheck.SetAuthzAccessorFactory(func(opName string) (sql.AuthorizationAccessor, func()) { diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 228986d5b99..1764519549b 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -708,12 +708,12 @@ func (ts *testServer) Stop(ctx context.Context) { ts.Server.stopper.Stop(ctx) } -// TestTenant is an in-memory instantiation of the SQL-only process created for -// each active Cockroach tenant. TestTenant provides tests with access to +// testTenant is an in-memory instantiation of the SQL-only process created for +// each active Cockroach tenant. testTenant provides tests with access to // internal methods and state on SQLServer. It is typically started in tests by // calling the TestServerInterface.StartTenant method or by calling the wrapper // serverutils.StartTenant method. -type TestTenant struct { +type testTenant struct { sql *SQLServer Cfg *BaseConfig SQLCfg *SQLConfig @@ -727,50 +727,50 @@ type TestTenant struct { pgPreServer *pgwire.PreServeConnHandler } -var _ serverutils.ApplicationLayerInterface = &TestTenant{} +var _ serverutils.ApplicationLayerInterface = &testTenant{} // AnnotateCtx is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) AnnotateCtx(ctx context.Context) context.Context { +func (t *testTenant) AnnotateCtx(ctx context.Context) context.Context { return t.sql.AnnotateCtx(ctx) } // SQLInstanceID is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLInstanceID() base.SQLInstanceID { +func (t *testTenant) SQLInstanceID() base.SQLInstanceID { return t.sql.SQLInstanceID() } // AdvRPCAddr is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) AdvRPCAddr() string { +func (t *testTenant) AdvRPCAddr() string { return t.Cfg.AdvertiseAddr } // AdvSQLAddr is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) AdvSQLAddr() string { +func (t *testTenant) AdvSQLAddr() string { return t.Cfg.SQLAdvertiseAddr } // SQLAddr is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLAddr() string { +func (t *testTenant) SQLAddr() string { return t.Cfg.SQLAddr } // HTTPAddr is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) HTTPAddr() string { +func (t *testTenant) HTTPAddr() string { return t.Cfg.HTTPAddr } // RPCAddr is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RPCAddr() string { +func (t *testTenant) RPCAddr() string { return t.Cfg.Addr } // SQLConn is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLConn(test serverutils.TestFataler, dbName string) *gosql.DB { +func (t *testTenant) SQLConn(test serverutils.TestFataler, dbName string) *gosql.DB { return t.SQLConnForUser(test, username.RootUser, dbName) } // SQLConnForUser is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLConnForUser( +func (t *testTenant) SQLConnForUser( test serverutils.TestFataler, userName, dbName string, ) *gosql.DB { db, err := t.SQLConnForUserE(userName, dbName) @@ -781,12 +781,12 @@ func (t *TestTenant) SQLConnForUser( } // SQLConnE is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLConnE(dbName string) (*gosql.DB, error) { +func (t *testTenant) SQLConnE(dbName string) (*gosql.DB, error) { return t.SQLConnForUserE(username.RootUser, dbName) } // SQLConnForUserE is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLConnForUserE(userName string, dbName string) (*gosql.DB, error) { +func (t *testTenant) SQLConnForUserE(userName string, dbName string) (*gosql.DB, error) { return openTestSQLConn(userName, dbName, t.Stopper(), t.pgL, t.Cfg.SQLAdvertiseAddr, @@ -795,18 +795,18 @@ func (t *TestTenant) SQLConnForUserE(userName string, dbName string) (*gosql.DB, } // DB is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) DB() *kv.DB { +func (t *testTenant) DB() *kv.DB { return t.sql.execCfg.DB } // PGServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) PGServer() interface{} { +func (t *testTenant) PGServer() interface{} { return t.sql.pgServer } // PGPreServer exposes the pgwire.PreServeConnHandler instance used by // the testServer. -func (ts *TestTenant) PGPreServer() interface{} { +func (ts *testTenant) PGPreServer() interface{} { if ts != nil { return ts.pgPreServer } @@ -814,155 +814,155 @@ func (ts *TestTenant) PGPreServer() interface{} { } // DiagnosticsReporter is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) DiagnosticsReporter() interface{} { +func (t *testTenant) DiagnosticsReporter() interface{} { return t.sql.diagnosticsReporter } // StatusServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) StatusServer() interface{} { +func (t *testTenant) StatusServer() interface{} { return t.t.status } // TenantStatusServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) TenantStatusServer() interface{} { +func (t *testTenant) TenantStatusServer() interface{} { return t.t.status } // SQLServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLServer() interface{} { +func (t *testTenant) SQLServer() interface{} { return t.sql.pgServer.SQLServer } // DistSQLServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) DistSQLServer() interface{} { +func (t *testTenant) DistSQLServer() interface{} { return t.sql.distSQLServer } // SetDistSQLSpanResolver is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SetDistSQLSpanResolver(spanResolver interface{}) { +func (t *testTenant) SetDistSQLSpanResolver(spanResolver interface{}) { t.sql.execCfg.DistSQLPlanner.SetSpanResolver(spanResolver.(physicalplan.SpanResolver)) } // DistSenderI is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) DistSenderI() interface{} { +func (t *testTenant) DistSenderI() interface{} { return t.sql.execCfg.DistSender } // InternalDB is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) InternalDB() interface{} { +func (t *testTenant) InternalDB() interface{} { return t.sql.internalDB } // LeaseManager is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) LeaseManager() interface{} { +func (t *testTenant) LeaseManager() interface{} { return t.sql.leaseMgr } // InternalExecutor is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) InternalExecutor() interface{} { +func (t *testTenant) InternalExecutor() interface{} { return t.sql.internalExecutor } // RPCContext is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RPCContext() *rpc.Context { +func (t *testTenant) RPCContext() *rpc.Context { return t.sql.execCfg.RPCContext } // JobRegistry is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) JobRegistry() interface{} { +func (t *testTenant) JobRegistry() interface{} { return t.sql.jobRegistry } // ExecutorConfig is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) ExecutorConfig() interface{} { +func (t *testTenant) ExecutorConfig() interface{} { return *t.sql.execCfg } // RangeFeedFactory is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RangeFeedFactory() interface{} { +func (t *testTenant) RangeFeedFactory() interface{} { return t.sql.execCfg.RangeFeedFactory } // ClusterSettings is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) ClusterSettings() *cluster.Settings { +func (t *testTenant) ClusterSettings() *cluster.Settings { return t.Cfg.Settings } // Stopper is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) Stopper() *stop.Stopper { +func (t *testTenant) Stopper() *stop.Stopper { return t.sql.stopper } // Clock is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) Clock() *hlc.Clock { +func (t *testTenant) Clock() *hlc.Clock { return t.sql.execCfg.Clock } // AmbientCtx implements serverutils.ApplicationLayerInterface. This // retrieves the ambient context for this server. This is intended for // exclusive use by test code. -func (t *TestTenant) AmbientCtx() log.AmbientContext { +func (t *testTenant) AmbientCtx() log.AmbientContext { return t.Cfg.AmbientCtx } // TestingKnobs is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) TestingKnobs() *base.TestingKnobs { +func (t *testTenant) TestingKnobs() *base.TestingKnobs { return &t.Cfg.TestingKnobs } // SQLServerInternal is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SQLServerInternal() interface{} { +func (t *testTenant) SQLServerInternal() interface{} { return t.sql } // SpanConfigKVAccessor is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SpanConfigKVAccessor() interface{} { +func (t *testTenant) SpanConfigKVAccessor() interface{} { return t.sql.tenantConnect } // SpanConfigReporter is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SpanConfigReporter() interface{} { +func (t *testTenant) SpanConfigReporter() interface{} { return t.sql.tenantConnect } // SpanConfigReconciler is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SpanConfigReconciler() interface{} { +func (t *testTenant) SpanConfigReconciler() interface{} { return t.sql.spanconfigMgr.Reconciler } // SpanConfigSQLTranslatorFactory is part of the // serverutils.ApplicationLayerInterface. -func (t *TestTenant) SpanConfigSQLTranslatorFactory() interface{} { +func (t *testTenant) SpanConfigSQLTranslatorFactory() interface{} { return t.sql.spanconfigSQLTranslatorFactory } // SpanConfigSQLWatcher is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SpanConfigSQLWatcher() interface{} { +func (t *testTenant) SpanConfigSQLWatcher() interface{} { return t.sql.spanconfigSQLWatcher } // SystemConfigProvider is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SystemConfigProvider() config.SystemConfigProvider { +func (t *testTenant) SystemConfigProvider() config.SystemConfigProvider { return t.sql.systemConfigWatcher } // DrainClients exports the drainClients() method for use by tests. -func (t *TestTenant) DrainClients(ctx context.Context) error { +func (t *testTenant) DrainClients(ctx context.Context) error { return t.drain.drainClients(ctx, nil /* reporter */) } // Readiness is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) Readiness(ctx context.Context) error { +func (t *testTenant) Readiness(ctx context.Context) error { return t.t.admin.checkReadinessForHealthCheck(ctx) } // MustGetSQLCounter implements the serverutils.ApplicationLayerInterface. -func (t *TestTenant) MustGetSQLCounter(name string) int64 { +func (t *testTenant) MustGetSQLCounter(name string) int64 { return mustGetSQLCounterForRegistry(t.sql.metricsRegistry, name) } // MustGetSQLNetworkCounter implements the serverutils.ApplicationLayerInterface. -func (t *TestTenant) MustGetSQLNetworkCounter(name string) int64 { +func (t *testTenant) MustGetSQLNetworkCounter(name string) int64 { reg := metric.NewRegistry() for _, m := range t.sql.pgServer.Metrics() { reg.AddMetricStruct(m) @@ -971,39 +971,39 @@ func (t *TestTenant) MustGetSQLNetworkCounter(name string) int64 { } // RangeDescIteratorFactory implements the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RangeDescIteratorFactory() interface{} { +func (t *testTenant) RangeDescIteratorFactory() interface{} { return t.sql.execCfg.RangeDescIteratorFactory } // Codec is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) Codec() keys.SQLCodec { +func (t *testTenant) Codec() keys.SQLCodec { return t.sql.execCfg.Codec } // Tracer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) Tracer() *tracing.Tracer { +func (t *testTenant) Tracer() *tracing.Tracer { return t.sql.ambientCtx.Tracer } // TracerI is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) TracerI() interface{} { +func (t *testTenant) TracerI() interface{} { return t.Tracer() } // ForceTableGC is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) ForceTableGC( +func (t *testTenant) ForceTableGC( ctx context.Context, database, table string, timestamp hlc.Timestamp, ) error { return internalForceTableGC(ctx, t, database, table, timestamp) } // DefaultZoneConfig is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) DefaultZoneConfig() zonepb.ZoneConfig { +func (t *testTenant) DefaultZoneConfig() zonepb.ZoneConfig { return *t.SystemConfigProvider().GetSystemConfig().DefaultZoneConfig } // SettingsWatcher is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SettingsWatcher() interface{} { +func (t *testTenant) SettingsWatcher() interface{} { return t.sql.settingsWatcher } @@ -1109,7 +1109,7 @@ func (ts *testServer) StartSharedProcessTenant( hts.t.admin = sqlServerWrapper.tenantAdmin hts.t.status = sqlServerWrapper.tenantStatus - testTenant := &TestTenant{ + tt := &testTenant{ sql: sqlServer, Cfg: sqlServer.cfg, SQLCfg: sqlServerWrapper.sqlCfg, @@ -1123,7 +1123,7 @@ func (ts *testServer) StartSharedProcessTenant( if err != nil { return nil, nil, err } - return testTenant, sqlDB, err + return tt, sqlDB, err } // DisableStartTenant is part of the serverutils.TenantControlInterface. @@ -1132,58 +1132,58 @@ func (ts *testServer) DisableStartTenant(reason error) { } // MigrationServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) MigrationServer() interface{} { +func (t *testTenant) MigrationServer() interface{} { return t.sql.migrationServer } // CollectionFactory is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) CollectionFactory() interface{} { +func (t *testTenant) CollectionFactory() interface{} { return t.sql.execCfg.CollectionFactory } // SystemTableIDResolver is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SystemTableIDResolver() interface{} { +func (t *testTenant) SystemTableIDResolver() interface{} { return t.sql.execCfg.SystemTableIDResolver } // QueryDatabaseID is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) QueryDatabaseID( +func (t *testTenant) QueryDatabaseID( ctx context.Context, userName username.SQLUsername, dbName string, ) (descpb.ID, error) { return t.t.admin.queryDatabaseID(ctx, userName, dbName) } // QueryTableID is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) QueryTableID( +func (t *testTenant) QueryTableID( ctx context.Context, userName username.SQLUsername, dbName, tbName string, ) (descpb.ID, error) { return t.t.admin.queryTableID(ctx, userName, dbName, tbName) } // StatsForSpans is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) StatsForSpan( +func (t *testTenant) StatsForSpan( ctx context.Context, span roachpb.Span, ) (*serverpb.TableStatsResponse, error) { return t.t.admin.statsForSpan(ctx, span) } // SetReady is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SetReady(ready bool) { +func (t *testTenant) SetReady(ready bool) { t.sql.isReady.Set(ready) } // SetAcceptSQLWithoutTLS is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) SetAcceptSQLWithoutTLS(accept bool) { +func (t *testTenant) SetAcceptSQLWithoutTLS(accept bool) { t.Cfg.AcceptSQLWithoutTLS = accept } // PrivilegeChecker is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) PrivilegeChecker() interface{} { +func (t *testTenant) PrivilegeChecker() interface{} { return t.t.admin.privilegeChecker } // HTTPAuthServer is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) HTTPAuthServer() interface{} { +func (t *testTenant) HTTPAuthServer() interface{} { return t.t.authentication } @@ -1514,7 +1514,7 @@ func (ts *testServer) StartTenant( hts.t.admin = sw.tenantAdmin hts.t.status = sw.tenantStatus - return &TestTenant{ + return &testTenant{ sql: sw.sqlServer, Cfg: &baseCfg, SQLCfg: &sqlCfg, @@ -2326,7 +2326,7 @@ func (ts *testServer) GetStatusClient(test serverutils.TestFataler) serverpb.Sta } // NewClientRPCContext is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) NewClientRPCContext( +func (t *testTenant) NewClientRPCContext( ctx context.Context, user username.SQLUsername, ) *rpc.Context { return newClientRPCContext(ctx, user, @@ -2337,7 +2337,7 @@ func (t *TestTenant) NewClientRPCContext( } // RPCClientConn is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RPCClientConn( +func (t *testTenant) RPCClientConn( test serverutils.TestFataler, user username.SQLUsername, ) *grpc.ClientConn { conn, err := t.RPCClientConnE(user) @@ -2348,20 +2348,20 @@ func (t *TestTenant) RPCClientConn( } // RPCClientConnE is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) RPCClientConnE(user username.SQLUsername) (*grpc.ClientConn, error) { +func (t *testTenant) RPCClientConnE(user username.SQLUsername) (*grpc.ClientConn, error) { ctx := context.Background() rpcCtx := t.NewClientRPCContext(ctx, user) return rpcCtx.GRPCDialPod(t.AdvRPCAddr(), t.SQLInstanceID(), rpc.DefaultClass).Connect(ctx) } // GetAdminClient is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) GetAdminClient(test serverutils.TestFataler) serverpb.AdminClient { +func (t *testTenant) GetAdminClient(test serverutils.TestFataler) serverpb.AdminClient { conn := t.RPCClientConn(test, username.RootUserName()) return serverpb.NewAdminClient(conn) } // GetStatusClient is part of the serverutils.ApplicationLayerInterface. -func (t *TestTenant) GetStatusClient(test serverutils.TestFataler) serverpb.StatusClient { +func (t *testTenant) GetStatusClient(test serverutils.TestFataler) serverpb.StatusClient { conn := t.RPCClientConn(test, username.RootUserName()) return serverpb.NewStatusClient(conn) } diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 9b5b28daba5..58efa90dc41 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -624,7 +624,7 @@ func TestLint(t *testing.T) { "git", "grep", "-nE", - `\*testServer`, + `\*(testServer|testTenant)`, "--", "server/*_test.go", ":!server/server_special_test.go",