mirror of
https://github.com/coder/coder.git
synced 2025-07-12 00:14:10 +00:00
Closes #14716 Closes #14717 Adds a new user-scoped tailnet API endpoint (`api/v2/tailnet`) with a new RPC stream for receiving updates on workspaces owned by a specific user, as defined in #14716. When a stream is started, the `WorkspaceUpdatesProvider` will begin listening on the user-scoped pubsub events implemented in #14964. When a relevant event type is seen (such as a workspace state transition), the provider will query the DB for all the workspaces (and agents) owned by the user. This gets compared against the result of the previous query to produce a set of workspace updates. Workspace updates can be requested for any user ID, however only workspaces the authorised user is permitted to `ActionRead` will have their updates streamed. Opening a tunnel to an agent requires that the user can perform `ActionSSH` against the workspace containing it.
396 lines
11 KiB
Go
396 lines
11 KiB
Go
package tailnet_test
|
|
|
|
import (
|
|
"context"
|
|
"io"
|
|
"net"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"golang.org/x/xerrors"
|
|
"tailscale.com/tailcfg"
|
|
|
|
"cdr.dev/slog"
|
|
"cdr.dev/slog/sloggers/slogtest"
|
|
"github.com/coder/coder/v2/tailnet"
|
|
"github.com/coder/coder/v2/tailnet/proto"
|
|
"github.com/coder/coder/v2/tailnet/tailnettest"
|
|
"github.com/coder/coder/v2/testutil"
|
|
"github.com/coder/quartz"
|
|
)
|
|
|
|
func TestClientService_ServeClient_V2(t *testing.T) {
|
|
t.Parallel()
|
|
fCoord := tailnettest.NewFakeCoordinator()
|
|
var coord tailnet.Coordinator = fCoord
|
|
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
|
coordPtr.Store(&coord)
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
derpMap := &tailcfg.DERPMap{Regions: map[int]*tailcfg.DERPRegion{999: {RegionCode: "test"}}}
|
|
|
|
telemetryEvents := make(chan []*proto.TelemetryEvent, 64)
|
|
uut, err := tailnet.NewClientService(tailnet.ClientServiceOptions{
|
|
Logger: logger,
|
|
CoordPtr: &coordPtr,
|
|
DERPMapUpdateFrequency: time.Millisecond,
|
|
DERPMapFn: func() *tailcfg.DERPMap { return derpMap },
|
|
NetworkTelemetryHandler: func(batch []*proto.TelemetryEvent) {
|
|
telemetryEvents <- batch
|
|
},
|
|
ResumeTokenProvider: tailnet.NewInsecureTestResumeTokenProvider(),
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
c, s := net.Pipe()
|
|
defer c.Close()
|
|
defer s.Close()
|
|
clientID := uuid.MustParse("10000001-0000-0000-0000-000000000000")
|
|
agentID := uuid.MustParse("20000001-0000-0000-0000-000000000000")
|
|
errCh := make(chan error, 1)
|
|
go func() {
|
|
err := uut.ServeClient(ctx, "2.0", s, tailnet.StreamID{
|
|
Name: "client",
|
|
ID: clientID,
|
|
Auth: tailnet.ClientCoordinateeAuth{
|
|
AgentID: agentID,
|
|
},
|
|
})
|
|
t.Logf("ServeClient returned; err=%v", err)
|
|
errCh <- err
|
|
}()
|
|
|
|
client, err := tailnet.NewDRPCClient(c, logger)
|
|
require.NoError(t, err)
|
|
|
|
// Coordinate
|
|
stream, err := client.Coordinate(ctx)
|
|
require.NoError(t, err)
|
|
defer stream.Close()
|
|
|
|
err = stream.Send(&proto.CoordinateRequest{
|
|
UpdateSelf: &proto.CoordinateRequest_UpdateSelf{Node: &proto.Node{PreferredDerp: 11}},
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
call := testutil.RequireRecvCtx(ctx, t, fCoord.CoordinateCalls)
|
|
require.NotNil(t, call)
|
|
require.Equal(t, call.ID, clientID)
|
|
require.Equal(t, call.Name, "client")
|
|
require.NoError(t, call.Auth.Authorize(ctx, &proto.CoordinateRequest{
|
|
AddTunnel: &proto.CoordinateRequest_Tunnel{Id: agentID[:]},
|
|
}))
|
|
req := testutil.RequireRecvCtx(ctx, t, call.Reqs)
|
|
require.Equal(t, int32(11), req.GetUpdateSelf().GetNode().GetPreferredDerp())
|
|
|
|
call.Resps <- &proto.CoordinateResponse{PeerUpdates: []*proto.CoordinateResponse_PeerUpdate{
|
|
{
|
|
Kind: proto.CoordinateResponse_PeerUpdate_NODE,
|
|
Node: &proto.Node{PreferredDerp: 22},
|
|
Id: agentID[:],
|
|
},
|
|
}}
|
|
resp, err := stream.Recv()
|
|
require.NoError(t, err)
|
|
u := resp.GetPeerUpdates()
|
|
require.Len(t, u, 1)
|
|
require.Equal(t, int32(22), u[0].GetNode().GetPreferredDerp())
|
|
|
|
err = stream.Close()
|
|
require.NoError(t, err)
|
|
|
|
// DERP Map
|
|
dms, err := client.StreamDERPMaps(ctx, &proto.StreamDERPMapsRequest{})
|
|
require.NoError(t, err)
|
|
|
|
gotDermMap, err := dms.Recv()
|
|
require.NoError(t, err)
|
|
require.Equal(t, "test", gotDermMap.GetRegions()[999].GetRegionCode())
|
|
err = dms.Close()
|
|
require.NoError(t, err)
|
|
|
|
// PostTelemetry
|
|
telemetryReq := &proto.TelemetryRequest{
|
|
Events: []*proto.TelemetryEvent{
|
|
{
|
|
Id: []byte("hi"),
|
|
},
|
|
{
|
|
Id: []byte("bye"),
|
|
},
|
|
},
|
|
}
|
|
res, err := client.PostTelemetry(ctx, telemetryReq)
|
|
require.NoError(t, err)
|
|
require.NotNil(t, res)
|
|
gotEvents := testutil.RequireRecvCtx(ctx, t, telemetryEvents)
|
|
require.Len(t, gotEvents, 2)
|
|
require.Equal(t, "hi", string(gotEvents[0].Id))
|
|
require.Equal(t, "bye", string(gotEvents[1].Id))
|
|
|
|
// RPCs closed; we need to close the Conn to end the session.
|
|
err = c.Close()
|
|
require.NoError(t, err)
|
|
err = testutil.RequireRecvCtx(ctx, t, errCh)
|
|
require.True(t, xerrors.Is(err, io.EOF) || xerrors.Is(err, io.ErrClosedPipe))
|
|
}
|
|
|
|
func TestClientService_ServeClient_V1(t *testing.T) {
|
|
t.Parallel()
|
|
fCoord := tailnettest.NewFakeCoordinator()
|
|
var coord tailnet.Coordinator = fCoord
|
|
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
|
coordPtr.Store(&coord)
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
uut, err := tailnet.NewClientService(tailnet.ClientServiceOptions{
|
|
Logger: logger,
|
|
CoordPtr: &coordPtr,
|
|
DERPMapUpdateFrequency: 0,
|
|
DERPMapFn: nil,
|
|
NetworkTelemetryHandler: nil,
|
|
ResumeTokenProvider: tailnet.NewInsecureTestResumeTokenProvider(),
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
c, s := net.Pipe()
|
|
defer c.Close()
|
|
defer s.Close()
|
|
clientID := uuid.MustParse("10000001-0000-0000-0000-000000000000")
|
|
agentID := uuid.MustParse("20000001-0000-0000-0000-000000000000")
|
|
errCh := make(chan error, 1)
|
|
go func() {
|
|
err := uut.ServeClient(ctx, "1.0", s, tailnet.StreamID{
|
|
Name: "client",
|
|
ID: clientID,
|
|
Auth: tailnet.ClientCoordinateeAuth{
|
|
AgentID: agentID,
|
|
},
|
|
})
|
|
t.Logf("ServeClient returned; err=%v", err)
|
|
errCh <- err
|
|
}()
|
|
|
|
err = testutil.RequireRecvCtx(ctx, t, errCh)
|
|
require.ErrorIs(t, err, tailnet.ErrUnsupportedVersion)
|
|
}
|
|
|
|
func TestNetworkTelemetryBatcher(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var (
|
|
events = make(chan []*proto.TelemetryEvent, 64)
|
|
mClock = quartz.NewMock(t)
|
|
b = tailnet.NewNetworkTelemetryBatcher(mClock, time.Millisecond, 3, func(batch []*proto.TelemetryEvent) {
|
|
assert.LessOrEqual(t, len(batch), 3)
|
|
events <- batch
|
|
})
|
|
)
|
|
|
|
b.Handler([]*proto.TelemetryEvent{
|
|
{Id: []byte("1")},
|
|
{Id: []byte("2")},
|
|
})
|
|
b.Handler([]*proto.TelemetryEvent{
|
|
{Id: []byte("3")},
|
|
{Id: []byte("4")},
|
|
})
|
|
|
|
// Should overflow and send a batch.
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
batch := testutil.RequireRecvCtx(ctx, t, events)
|
|
require.Len(t, batch, 3)
|
|
require.Equal(t, "1", string(batch[0].Id))
|
|
require.Equal(t, "2", string(batch[1].Id))
|
|
require.Equal(t, "3", string(batch[2].Id))
|
|
|
|
// Should send any pending events when the ticker fires.
|
|
mClock.Advance(time.Millisecond)
|
|
batch = testutil.RequireRecvCtx(ctx, t, events)
|
|
require.Len(t, batch, 1)
|
|
require.Equal(t, "4", string(batch[0].Id))
|
|
|
|
// Should send any pending events when closed.
|
|
b.Handler([]*proto.TelemetryEvent{
|
|
{Id: []byte("5")},
|
|
{Id: []byte("6")},
|
|
})
|
|
err := b.Close()
|
|
require.NoError(t, err)
|
|
batch = testutil.RequireRecvCtx(ctx, t, events)
|
|
require.Len(t, batch, 2)
|
|
require.Equal(t, "5", string(batch[0].Id))
|
|
require.Equal(t, "6", string(batch[1].Id))
|
|
}
|
|
|
|
func TestClientUserCoordinateeAuth(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
|
|
agentID := uuid.UUID{0x01}
|
|
agentID2 := uuid.UUID{0x02}
|
|
clientID := uuid.UUID{0x03}
|
|
|
|
updatesCh := make(chan *proto.WorkspaceUpdate, 1)
|
|
updatesProvider := &fakeUpdatesProvider{ch: updatesCh}
|
|
|
|
fCoord, client := createUpdateService(t, ctx, clientID, updatesProvider)
|
|
|
|
// Coordinate
|
|
stream, err := client.Coordinate(ctx)
|
|
require.NoError(t, err)
|
|
defer stream.Close()
|
|
|
|
err = stream.Send(&proto.CoordinateRequest{
|
|
UpdateSelf: &proto.CoordinateRequest_UpdateSelf{Node: &proto.Node{PreferredDerp: 11}},
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
call := testutil.RequireRecvCtx(ctx, t, fCoord.CoordinateCalls)
|
|
require.NotNil(t, call)
|
|
require.Equal(t, call.ID, clientID)
|
|
require.Equal(t, call.Name, "client")
|
|
req := testutil.RequireRecvCtx(ctx, t, call.Reqs)
|
|
require.Equal(t, int32(11), req.GetUpdateSelf().GetNode().GetPreferredDerp())
|
|
|
|
// Authorize uses `ClientUserCoordinateeAuth`
|
|
require.NoError(t, call.Auth.Authorize(ctx, &proto.CoordinateRequest{
|
|
AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tailnet.UUIDToByteSlice(agentID)},
|
|
}))
|
|
require.Error(t, call.Auth.Authorize(ctx, &proto.CoordinateRequest{
|
|
AddTunnel: &proto.CoordinateRequest_Tunnel{Id: tailnet.UUIDToByteSlice(agentID2)},
|
|
}))
|
|
}
|
|
|
|
func TestWorkspaceUpdates(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
ctx := testutil.Context(t, testutil.WaitShort)
|
|
updatesCh := make(chan *proto.WorkspaceUpdate, 1)
|
|
updatesProvider := &fakeUpdatesProvider{ch: updatesCh}
|
|
|
|
clientID := uuid.UUID{0x03}
|
|
wsID := uuid.UUID{0x04}
|
|
|
|
_, client := createUpdateService(t, ctx, clientID, updatesProvider)
|
|
|
|
// Workspace updates
|
|
expected := &proto.WorkspaceUpdate{
|
|
UpsertedWorkspaces: []*proto.Workspace{
|
|
{
|
|
Id: tailnet.UUIDToByteSlice(wsID),
|
|
Name: "ws1",
|
|
Status: proto.Workspace_RUNNING,
|
|
},
|
|
},
|
|
UpsertedAgents: []*proto.Agent{},
|
|
DeletedWorkspaces: []*proto.Workspace{},
|
|
DeletedAgents: []*proto.Agent{},
|
|
}
|
|
updatesCh <- expected
|
|
|
|
updatesStream, err := client.WorkspaceUpdates(ctx, &proto.WorkspaceUpdatesRequest{
|
|
WorkspaceOwnerId: tailnet.UUIDToByteSlice(clientID),
|
|
})
|
|
require.NoError(t, err)
|
|
defer updatesStream.Close()
|
|
|
|
updates, err := updatesStream.Recv()
|
|
require.NoError(t, err)
|
|
require.Len(t, updates.GetUpsertedWorkspaces(), 1)
|
|
require.Equal(t, expected.GetUpsertedWorkspaces()[0].GetName(), updates.GetUpsertedWorkspaces()[0].GetName())
|
|
require.Equal(t, expected.GetUpsertedWorkspaces()[0].GetStatus(), updates.GetUpsertedWorkspaces()[0].GetStatus())
|
|
require.Equal(t, expected.GetUpsertedWorkspaces()[0].GetId(), updates.GetUpsertedWorkspaces()[0].GetId())
|
|
}
|
|
|
|
//nolint:revive // t takes precedence
|
|
func createUpdateService(t *testing.T, ctx context.Context, clientID uuid.UUID, updates tailnet.WorkspaceUpdatesProvider) (*tailnettest.FakeCoordinator, proto.DRPCTailnetClient) {
|
|
fCoord := tailnettest.NewFakeCoordinator()
|
|
var coord tailnet.Coordinator = fCoord
|
|
coordPtr := atomic.Pointer[tailnet.Coordinator]{}
|
|
coordPtr.Store(&coord)
|
|
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
|
|
|
|
uut, err := tailnet.NewClientService(tailnet.ClientServiceOptions{
|
|
Logger: logger,
|
|
CoordPtr: &coordPtr,
|
|
WorkspaceUpdatesProvider: updates,
|
|
})
|
|
require.NoError(t, err)
|
|
|
|
c, s := net.Pipe()
|
|
t.Cleanup(func() {
|
|
_ = c.Close()
|
|
_ = s.Close()
|
|
})
|
|
|
|
errCh := make(chan error, 1)
|
|
go func() {
|
|
err := uut.ServeClient(ctx, "2.0", s, tailnet.StreamID{
|
|
Name: "client",
|
|
ID: clientID,
|
|
Auth: tailnet.ClientUserCoordinateeAuth{
|
|
Auth: &fakeTunnelAuth{},
|
|
},
|
|
})
|
|
t.Logf("ServeClient returned; err=%v", err)
|
|
errCh <- err
|
|
}()
|
|
|
|
client, err := tailnet.NewDRPCClient(c, logger)
|
|
require.NoError(t, err)
|
|
|
|
t.Cleanup(func() {
|
|
err = c.Close()
|
|
require.NoError(t, err)
|
|
err = testutil.RequireRecvCtx(ctx, t, errCh)
|
|
require.True(t, xerrors.Is(err, io.EOF) || xerrors.Is(err, io.ErrClosedPipe))
|
|
})
|
|
return fCoord, client
|
|
}
|
|
|
|
type fakeUpdatesProvider struct {
|
|
ch chan *proto.WorkspaceUpdate
|
|
}
|
|
|
|
func (*fakeUpdatesProvider) Close() error {
|
|
return nil
|
|
}
|
|
|
|
func (f *fakeUpdatesProvider) Subscribe(context.Context, uuid.UUID) (tailnet.Subscription, error) {
|
|
return &fakeSubscription{ch: f.ch}, nil
|
|
}
|
|
|
|
type fakeSubscription struct {
|
|
ch chan *proto.WorkspaceUpdate
|
|
}
|
|
|
|
func (*fakeSubscription) Close() error {
|
|
return nil
|
|
}
|
|
|
|
func (f *fakeSubscription) Updates() <-chan *proto.WorkspaceUpdate {
|
|
return f.ch
|
|
}
|
|
|
|
var _ tailnet.Subscription = (*fakeSubscription)(nil)
|
|
|
|
var _ tailnet.WorkspaceUpdatesProvider = (*fakeUpdatesProvider)(nil)
|
|
|
|
type fakeTunnelAuth struct{}
|
|
|
|
// AuthorizeTunnel implements tailnet.TunnelAuthorizer.
|
|
func (*fakeTunnelAuth) AuthorizeTunnel(_ context.Context, agentID uuid.UUID) error {
|
|
if agentID[0] != 1 {
|
|
return xerrors.New("policy disallows request")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
var _ tailnet.TunnelAuthorizer = (*fakeTunnelAuth)(nil)
|