chore: rename Coordinator to CoordinatorV1 (#11222)

Renames the tailnet.Coordinator to represent both v1 and v2 APIs, so that we can use this interface for the main atomic pointer.

Part of #10532
This commit is contained in:
Spike Curtis
2023-12-15 11:38:12 +04:00
committed by GitHub
parent 545cb9a7cc
commit ad3fed72bc
9 changed files with 37 additions and 29 deletions

View File

@ -24,7 +24,7 @@ func NewClient(t testing.TB,
agentID uuid.UUID, agentID uuid.UUID,
manifest agentsdk.Manifest, manifest agentsdk.Manifest,
statsChan chan *agentsdk.Stats, statsChan chan *agentsdk.Stats,
coordinator tailnet.Coordinator, coordinator tailnet.CoordinatorV1,
) *Client { ) *Client {
if manifest.AgentID == uuid.Nil { if manifest.AgentID == uuid.Nil {
manifest.AgentID = agentID manifest.AgentID = agentID
@ -47,7 +47,7 @@ type Client struct {
manifest agentsdk.Manifest manifest agentsdk.Manifest
metadata map[string]agentsdk.Metadata metadata map[string]agentsdk.Metadata
statsChan chan *agentsdk.Stats statsChan chan *agentsdk.Stats
coordinator tailnet.Coordinator coordinator tailnet.CoordinatorV1
LastWorkspaceAgent func() LastWorkspaceAgent func()
PatchWorkspaceLogs func() error PatchWorkspaceLogs func() error
GetServiceBannerFunc func() (codersdk.ServiceBannerConfig, error) GetServiceBannerFunc func() (codersdk.ServiceBannerConfig, error)

View File

@ -847,7 +847,7 @@ func (r *RootCmd) Server(newAPI func(context.Context, *coderd.Options) (*coderd.
defer closeBatcher() defer closeBatcher()
// We use a separate coderAPICloser so the Enterprise API // We use a separate coderAPICloser so the Enterprise API
// can have it's own close functions. This is cleaner // can have its own close functions. This is cleaner
// than abstracting the Coder API itself. // than abstracting the Coder API itself.
coderAPI, coderAPICloser, err := newAPI(ctx, options) coderAPI, coderAPICloser, err := newAPI(ctx, options)
if err != nil { if err != nil {

View File

@ -1508,7 +1508,7 @@ func convertWorkspaceAgentMetadataDesc(mds []database.WorkspaceAgentMetadatum) [
return metadata return metadata
} }
func convertWorkspaceAgent(derpMap *tailcfg.DERPMap, coordinator tailnet.Coordinator, func convertWorkspaceAgent(derpMap *tailcfg.DERPMap, coordinator tailnet.CoordinatorV1,
dbAgent database.WorkspaceAgent, apps []codersdk.WorkspaceApp, scripts []codersdk.WorkspaceAgentScript, logSources []codersdk.WorkspaceAgentLogSource, dbAgent database.WorkspaceAgent, apps []codersdk.WorkspaceApp, scripts []codersdk.WorkspaceAgentScript, logSources []codersdk.WorkspaceAgentLogSource,
agentInactiveDisconnectTimeout time.Duration, agentFallbackTroubleshootingURL string, agentInactiveDisconnectTimeout time.Duration, agentFallbackTroubleshootingURL string,
) (codersdk.WorkspaceAgent, error) { ) (codersdk.WorkspaceAgent, error) {

View File

@ -215,7 +215,7 @@ type client struct {
t *testing.T t *testing.T
agentID uuid.UUID agentID uuid.UUID
manifest agentsdk.Manifest manifest agentsdk.Manifest
coordinator tailnet.Coordinator coordinator tailnet.CoordinatorV1
} }
func (c *client) Manifest(_ context.Context) (agentsdk.Manifest, error) { func (c *client) Manifest(_ context.Context) (agentsdk.Manifest, error) {

View File

@ -23,6 +23,7 @@ import (
"github.com/coder/coder/v2/coderd/util/slice" "github.com/coder/coder/v2/coderd/util/slice"
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
agpl "github.com/coder/coder/v2/tailnet" agpl "github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/proto"
) )
// NewCoordinator creates a new high availability coordinator // NewCoordinator creates a new high availability coordinator
@ -156,6 +157,24 @@ type haCoordinator struct {
legacyAgents map[uuid.UUID]struct{} legacyAgents map[uuid.UUID]struct{}
} }
func (c *haCoordinator) Coordinate(ctx context.Context, _ uuid.UUID, _ string, _ agpl.TunnelAuth) (chan<- *proto.CoordinateRequest, <-chan *proto.CoordinateResponse) {
// HA Coordinator does NOT support v2 API and this is just here to appease the compiler and prevent
// panics while we build out v2 support elsewhere. We will retire the HA Coordinator in favor of
// PG Coordinator before we turn on the v2 API.
c.log.Warn(ctx, "v2 API invoked but unimplemented")
resp := make(chan *proto.CoordinateResponse)
close(resp)
req := make(chan *proto.CoordinateRequest)
go func() {
for {
if _, ok := <-req; !ok {
return
}
}
}()
return req, resp
}
// Node returns an in-memory node by ID. // Node returns an in-memory node by ID.
func (c *haCoordinator) Node(id uuid.UUID) *agpl.Node { func (c *haCoordinator) Node(id uuid.UUID) *agpl.Node {
c.mutex.Lock() c.mutex.Lock()

View File

@ -149,12 +149,6 @@ func newPGCoordInternal(
return c, nil return c, nil
} }
// NewPGCoordV2 creates a high-availability coordinator that stores state in the PostgreSQL database and
// receives notifications of updates via the pubsub.
func NewPGCoordV2(ctx context.Context, logger slog.Logger, ps pubsub.Pubsub, store database.Store) (agpl.CoordinatorV2, error) {
return newPGCoordInternal(ctx, logger, ps, store)
}
func (c *pgCoord) ServeMultiAgent(id uuid.UUID) agpl.MultiAgentConn { func (c *pgCoord) ServeMultiAgent(id uuid.UUID) agpl.MultiAgentConn {
return agpl.ServeMultiAgent(c, c.logger, id) return agpl.ServeMultiAgent(c, c.logger, id)
} }

View File

@ -611,7 +611,7 @@ func TestPGCoordinator_BidirectionalTunnels(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel() defer cancel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator, err := tailnet.NewPGCoordV2(ctx, logger, ps, store) coordinator, err := tailnet.NewPGCoord(ctx, logger, ps, store)
require.NoError(t, err) require.NoError(t, err)
defer coordinator.Close() defer coordinator.Close()
agpltest.BidirectionalTunnels(ctx, t, coordinator) agpltest.BidirectionalTunnels(ctx, t, coordinator)
@ -626,7 +626,7 @@ func TestPGCoordinator_GracefulDisconnect(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel() defer cancel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator, err := tailnet.NewPGCoordV2(ctx, logger, ps, store) coordinator, err := tailnet.NewPGCoord(ctx, logger, ps, store)
require.NoError(t, err) require.NoError(t, err)
defer coordinator.Close() defer coordinator.Close()
agpltest.GracefulDisconnectTest(ctx, t, coordinator) agpltest.GracefulDisconnectTest(ctx, t, coordinator)
@ -641,7 +641,7 @@ func TestPGCoordinator_Lost(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong) ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitSuperLong)
defer cancel() defer cancel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator, err := tailnet.NewPGCoordV2(ctx, logger, ps, store) coordinator, err := tailnet.NewPGCoord(ctx, logger, ps, store)
require.NoError(t, err) require.NoError(t, err)
defer coordinator.Close() defer coordinator.Close()
agpltest.LostTest(ctx, t, coordinator) agpltest.LostTest(ctx, t, coordinator)
@ -676,7 +676,7 @@ func newTestConn(ids []uuid.UUID) *testConn {
return a return a
} }
func newTestAgent(t *testing.T, coord agpl.Coordinator, name string, id ...uuid.UUID) *testConn { func newTestAgent(t *testing.T, coord agpl.CoordinatorV1, name string, id ...uuid.UUID) *testConn {
a := newTestConn(id) a := newTestConn(id)
go func() { go func() {
err := coord.ServeAgent(a.serverWS, a.id, name) err := coord.ServeAgent(a.serverWS, a.id, name)
@ -731,7 +731,7 @@ func (c *testConn) waitForClose(ctx context.Context, t *testing.T) {
} }
} }
func newTestClient(t *testing.T, coord agpl.Coordinator, agentID uuid.UUID, id ...uuid.UUID) *testConn { func newTestClient(t *testing.T, coord agpl.CoordinatorV1, agentID uuid.UUID, id ...uuid.UUID) *testConn {
c := newTestConn(id) c := newTestConn(id)
go func() { go func() {
err := coord.ServeClient(c.serverWS, c.id, agentID) err := coord.ServeClient(c.serverWS, c.id, agentID)

View File

@ -27,6 +27,11 @@ import (
// └──────────────────┘ └────────────────────┘ └───────────────────┘ └──────────────────┘ // └──────────────────┘ └────────────────────┘ └───────────────────┘ └──────────────────┘
// Coordinators have different guarantees for HA support. // Coordinators have different guarantees for HA support.
type Coordinator interface { type Coordinator interface {
CoordinatorV1
CoordinatorV2
}
type CoordinatorV1 interface {
// ServeHTTPDebug serves a debug webpage that shows the internal state of // ServeHTTPDebug serves a debug webpage that shows the internal state of
// the coordinator. // the coordinator.
ServeHTTPDebug(w http.ResponseWriter, r *http.Request) ServeHTTPDebug(w http.ResponseWriter, r *http.Request)
@ -143,16 +148,6 @@ func NewCoordinator(logger slog.Logger) Coordinator {
} }
} }
// NewCoordinatorV2 constructs a new in-memory connection coordinator. This
// coordinator is incompatible with multiple Coder replicas as all node data is
// in-memory.
func NewCoordinatorV2(logger slog.Logger) CoordinatorV2 {
return &coordinator{
core: newCore(logger.Named(LoggerName)),
closedChan: make(chan struct{}),
}
}
// coordinator exchanges nodes with agents to establish connections entirely in-memory. // coordinator exchanges nodes with agents to establish connections entirely in-memory.
// The Enterprise implementation provides this for high-availability. // The Enterprise implementation provides this for high-availability.
// ┌──────────────────┐ ┌────────────────────┐ ┌───────────────────┐ ┌──────────────────┐ // ┌──────────────────┐ ┌────────────────────┐ ┌───────────────────┐ ┌──────────────────┐

View File

@ -357,7 +357,7 @@ func TestCoordinator_AgentUpdateWhileClientConnects(t *testing.T) {
func TestCoordinator_BidirectionalTunnels(t *testing.T) { func TestCoordinator_BidirectionalTunnels(t *testing.T) {
t.Parallel() t.Parallel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator := tailnet.NewCoordinatorV2(logger) coordinator := tailnet.NewCoordinator(logger)
ctx := testutil.Context(t, testutil.WaitShort) ctx := testutil.Context(t, testutil.WaitShort)
test.BidirectionalTunnels(ctx, t, coordinator) test.BidirectionalTunnels(ctx, t, coordinator)
} }
@ -365,7 +365,7 @@ func TestCoordinator_BidirectionalTunnels(t *testing.T) {
func TestCoordinator_GracefulDisconnect(t *testing.T) { func TestCoordinator_GracefulDisconnect(t *testing.T) {
t.Parallel() t.Parallel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator := tailnet.NewCoordinatorV2(logger) coordinator := tailnet.NewCoordinator(logger)
ctx := testutil.Context(t, testutil.WaitShort) ctx := testutil.Context(t, testutil.WaitShort)
test.GracefulDisconnectTest(ctx, t, coordinator) test.GracefulDisconnectTest(ctx, t, coordinator)
} }
@ -373,7 +373,7 @@ func TestCoordinator_GracefulDisconnect(t *testing.T) {
func TestCoordinator_Lost(t *testing.T) { func TestCoordinator_Lost(t *testing.T) {
t.Parallel() t.Parallel()
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug) logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
coordinator := tailnet.NewCoordinatorV2(logger) coordinator := tailnet.NewCoordinator(logger)
ctx := testutil.Context(t, testutil.WaitShort) ctx := testutil.Context(t, testutil.WaitShort)
test.LostTest(ctx, t, coordinator) test.LostTest(ctx, t, coordinator)
} }