chore: migrate to github.com/coder/clistat (#17107)

Migrate from in-tree `clistat` package to
https://github.com/coder/clistat.
This commit is contained in:
Danielle Maywood
2025-03-26 10:36:53 +00:00
committed by GitHub
parent 811097ef03
commit 1bbbae8d57
13 changed files with 17 additions and 1203 deletions

View File

@ -36,6 +36,7 @@ import (
"tailscale.com/util/clientmetric"
"cdr.dev/slog"
"github.com/coder/clistat"
"github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/agentexec"
"github.com/coder/coder/v2/agent/agentscripts"
@ -44,7 +45,6 @@ import (
"github.com/coder/coder/v2/agent/proto/resourcesmonitor"
"github.com/coder/coder/v2/agent/reconnectingpty"
"github.com/coder/coder/v2/buildinfo"
"github.com/coder/coder/v2/cli/clistat"
"github.com/coder/coder/v2/cli/gitauth"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/codersdk"

View File

@ -3,7 +3,7 @@ package resourcesmonitor
import (
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/clistat"
"github.com/coder/clistat"
)
type Statter interface {

View File

@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/coder/clistat"
"github.com/coder/coder/v2/agent/proto/resourcesmonitor"
"github.com/coder/coder/v2/cli/clistat"
"github.com/coder/coder/v2/coderd/util/ptr"
)

View File

@ -1,371 +0,0 @@
package clistat
import (
"bufio"
"bytes"
"strconv"
"strings"
"github.com/hashicorp/go-multierror"
"github.com/spf13/afero"
"golang.org/x/xerrors"
"tailscale.com/types/ptr"
)
// Paths for CGroupV1.
// Ref: https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
const (
// CPU usage of all tasks in cgroup in nanoseconds.
cgroupV1CPUAcctUsage = "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage"
// CFS quota and period for cgroup in MICROseconds
cgroupV1CFSQuotaUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us"
// CFS period for cgroup in MICROseconds
cgroupV1CFSPeriodUs = "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us"
// Maximum memory usable by cgroup in bytes
cgroupV1MemoryMaxUsageBytes = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
// Current memory usage of cgroup in bytes
cgroupV1MemoryUsageBytes = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
// Other memory stats - we are interested in total_inactive_file
cgroupV1MemoryStat = "/sys/fs/cgroup/memory/memory.stat"
)
// Paths for CGroupV2.
// Ref: https://docs.kernel.org/admin-guide/cgroup-v2.html
const (
// Contains quota and period in microseconds separated by a space.
cgroupV2CPUMax = "/sys/fs/cgroup/cpu.max"
// Contains current CPU usage under usage_usec
cgroupV2CPUStat = "/sys/fs/cgroup/cpu.stat"
// Contains current cgroup memory usage in bytes.
cgroupV2MemoryUsageBytes = "/sys/fs/cgroup/memory.current"
// Contains max cgroup memory usage in bytes.
cgroupV2MemoryMaxBytes = "/sys/fs/cgroup/memory.max"
// Other memory stats - we are interested in total_inactive_file
cgroupV2MemoryStat = "/sys/fs/cgroup/memory.stat"
)
const (
// 9223372036854771712 is the highest positive signed 64-bit integer (263-1),
// rounded down to multiples of 4096 (2^12), the most common page size on x86 systems.
// This is used by docker to indicate no memory limit.
UnlimitedMemory int64 = 9223372036854771712
)
// ContainerCPU returns the CPU usage of the container cgroup.
// This is calculated as difference of two samples of the
// CPU usage of the container cgroup.
// The total is read from the relevant path in /sys/fs/cgroup.
// If there is no limit set, the total is assumed to be the
// number of host cores multiplied by the CFS period.
// If the system is not containerized, this always returns nil.
func (s *Statter) ContainerCPU() (*Result, error) {
// Firstly, check if we are containerized.
if ok, err := IsContainerized(s.fs); err != nil || !ok {
return nil, nil //nolint: nilnil
}
total, err := s.cGroupCPUTotal()
if err != nil {
return nil, xerrors.Errorf("get total cpu: %w", err)
}
used1, err := s.cGroupCPUUsed()
if err != nil {
return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
}
// The measurements in /sys/fs/cgroup are counters.
// We need to wait for a bit to get a difference.
// Note that someone could reset the counter in the meantime.
// We can't do anything about that.
s.wait(s.sampleInterval)
used2, err := s.cGroupCPUUsed()
if err != nil {
return nil, xerrors.Errorf("get cgroup CPU usage: %w", err)
}
if used2 < used1 {
// Someone reset the counter. Best we can do is count from zero.
used1 = 0
}
r := &Result{
Unit: "cores",
Used: used2 - used1,
Prefix: PrefixDefault,
}
if total > 0 {
r.Total = ptr.To(total)
}
return r, nil
}
func (s *Statter) cGroupCPUTotal() (used float64, err error) {
if s.isCGroupV2() {
return s.cGroupV2CPUTotal()
}
// Fall back to CGroupv1
return s.cGroupV1CPUTotal()
}
func (s *Statter) cGroupCPUUsed() (used float64, err error) {
if s.isCGroupV2() {
return s.cGroupV2CPUUsed()
}
return s.cGroupV1CPUUsed()
}
func (s *Statter) isCGroupV2() bool {
// Check for the presence of /sys/fs/cgroup/cpu.max
_, err := s.fs.Stat(cgroupV2CPUMax)
return err == nil
}
func (s *Statter) cGroupV2CPUUsed() (used float64, err error) {
usageUs, err := readInt64Prefix(s.fs, cgroupV2CPUStat, "usage_usec")
if err != nil {
return 0, xerrors.Errorf("get cgroupv2 cpu used: %w", err)
}
periodUs, err := readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
if err != nil {
return 0, xerrors.Errorf("get cpu period: %w", err)
}
return float64(usageUs) / float64(periodUs), nil
}
func (s *Statter) cGroupV2CPUTotal() (total float64, err error) {
var quotaUs, periodUs int64
periodUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 1)
if err != nil {
return 0, xerrors.Errorf("get cpu period: %w", err)
}
quotaUs, err = readInt64SepIdx(s.fs, cgroupV2CPUMax, " ", 0)
if err != nil {
if xerrors.Is(err, strconv.ErrSyntax) {
// If the value is not a valid integer, assume it is the string
// 'max' and that there is no limit set.
return -1, nil
}
return 0, xerrors.Errorf("get cpu quota: %w", err)
}
return float64(quotaUs) / float64(periodUs), nil
}
func (s *Statter) cGroupV1CPUTotal() (float64, error) {
periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
if err != nil {
// Try alternate path under /sys/fs/cpu
var merr error
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
periodUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSPeriodUs, "cpu,cpuacct", "cpu", 1))
if err != nil {
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
return 0, merr
}
}
quotaUs, err := readInt64(s.fs, cgroupV1CFSQuotaUs)
if err != nil {
// Try alternate path under /sys/fs/cpu
var merr error
merr = multierror.Append(merr, xerrors.Errorf("get cpu quota: %w", err))
quotaUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSQuotaUs, "cpu,cpuacct", "cpu", 1))
if err != nil {
merr = multierror.Append(merr, xerrors.Errorf("get cpu quota: %w", err))
return 0, merr
}
}
if quotaUs < 0 {
return -1, nil
}
return float64(quotaUs) / float64(periodUs), nil
}
func (s *Statter) cGroupV1CPUUsed() (float64, error) {
usageNs, err := readInt64(s.fs, cgroupV1CPUAcctUsage)
if err != nil {
// Try alternate path under /sys/fs/cgroup/cpuacct
var merr error
merr = multierror.Append(merr, xerrors.Errorf("read cpu used: %w", err))
usageNs, err = readInt64(s.fs, strings.Replace(cgroupV1CPUAcctUsage, "cpu,cpuacct", "cpuacct", 1))
if err != nil {
merr = multierror.Append(merr, xerrors.Errorf("read cpu used: %w", err))
return 0, merr
}
}
// usage is in ns, convert to us
usageNs /= 1000
periodUs, err := readInt64(s.fs, cgroupV1CFSPeriodUs)
if err != nil {
// Try alternate path under /sys/fs/cpu
var merr error
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
periodUs, err = readInt64(s.fs, strings.Replace(cgroupV1CFSPeriodUs, "cpu,cpuacct", "cpu", 1))
if err != nil {
merr = multierror.Append(merr, xerrors.Errorf("get cpu period: %w", err))
return 0, merr
}
}
return float64(usageNs) / float64(periodUs), nil
}
// ContainerMemory returns the memory usage of the container cgroup.
// If the system is not containerized, this always returns nil.
func (s *Statter) ContainerMemory(p Prefix) (*Result, error) {
if ok, err := IsContainerized(s.fs); err != nil || !ok {
return nil, nil //nolint:nilnil
}
if s.isCGroupV2() {
return s.cGroupV2Memory(p)
}
// Fall back to CGroupv1
return s.cGroupV1Memory(p)
}
func (s *Statter) cGroupV2Memory(p Prefix) (*Result, error) {
r := &Result{
Unit: "B",
Prefix: p,
}
maxUsageBytes, err := readInt64(s.fs, cgroupV2MemoryMaxBytes)
if err != nil {
if !xerrors.Is(err, strconv.ErrSyntax) {
return nil, xerrors.Errorf("read memory total: %w", err)
}
// If the value is not a valid integer, assume it is the string
// 'max' and that there is no limit set.
} else {
r.Total = ptr.To(float64(maxUsageBytes))
}
currUsageBytes, err := readInt64(s.fs, cgroupV2MemoryUsageBytes)
if err != nil {
return nil, xerrors.Errorf("read memory usage: %w", err)
}
inactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV2MemoryStat, "inactive_file")
if err != nil {
return nil, xerrors.Errorf("read memory stats: %w", err)
}
r.Used = float64(currUsageBytes - inactiveFileBytes)
return r, nil
}
func (s *Statter) cGroupV1Memory(p Prefix) (*Result, error) {
r := &Result{
Unit: "B",
Prefix: p,
}
maxUsageBytes, err := readInt64(s.fs, cgroupV1MemoryMaxUsageBytes)
if err != nil {
if !xerrors.Is(err, strconv.ErrSyntax) {
return nil, xerrors.Errorf("read memory total: %w", err)
}
// I haven't found an instance where this isn't a valid integer.
// Nonetheless, if it is not, assume there is no limit set.
maxUsageBytes = -1
}
// Set to unlimited if we detect the unlimited docker value.
if maxUsageBytes == UnlimitedMemory {
maxUsageBytes = -1
}
// need a space after total_rss so we don't hit something else
usageBytes, err := readInt64(s.fs, cgroupV1MemoryUsageBytes)
if err != nil {
return nil, xerrors.Errorf("read memory usage: %w", err)
}
totalInactiveFileBytes, err := readInt64Prefix(s.fs, cgroupV1MemoryStat, "total_inactive_file")
if err != nil {
return nil, xerrors.Errorf("read memory stats: %w", err)
}
// If max usage bytes is -1, there is no memory limit set.
if maxUsageBytes > 0 {
r.Total = ptr.To(float64(maxUsageBytes))
}
// Total memory used is usage - total_inactive_file
r.Used = float64(usageBytes - totalInactiveFileBytes)
return r, nil
}
// read an int64 value from path
func readInt64(fs afero.Fs, path string) (int64, error) {
data, err := afero.ReadFile(fs, path)
if err != nil {
return 0, xerrors.Errorf("read %s: %w", path, err)
}
val, err := strconv.ParseInt(string(bytes.TrimSpace(data)), 10, 64)
if err != nil {
return 0, xerrors.Errorf("parse %s: %w", path, err)
}
return val, nil
}
// read an int64 value from path at field idx separated by sep
func readInt64SepIdx(fs afero.Fs, path, sep string, idx int) (int64, error) {
data, err := afero.ReadFile(fs, path)
if err != nil {
return 0, xerrors.Errorf("read %s: %w", path, err)
}
parts := strings.Split(string(data), sep)
if len(parts) < idx {
return 0, xerrors.Errorf("expected line %q to have at least %d parts", string(data), idx+1)
}
val, err := strconv.ParseInt(strings.TrimSpace(parts[idx]), 10, 64)
if err != nil {
return 0, xerrors.Errorf("parse %s: %w", path, err)
}
return val, nil
}
// read the first int64 value from path prefixed with prefix
func readInt64Prefix(fs afero.Fs, path, prefix string) (int64, error) {
data, err := afero.ReadFile(fs, path)
if err != nil {
return 0, xerrors.Errorf("read %s: %w", path, err)
}
scn := bufio.NewScanner(bytes.NewReader(data))
for scn.Scan() {
line := strings.TrimSpace(scn.Text())
if !strings.HasPrefix(line, prefix) {
continue
}
parts := strings.Fields(line)
if len(parts) != 2 {
return 0, xerrors.Errorf("parse %s: expected two fields but got %s", path, line)
}
val, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64)
if err != nil {
return 0, xerrors.Errorf("parse %s: %w", path, err)
}
return val, nil
}
return 0, xerrors.Errorf("parse %s: did not find line with prefix %s", path, prefix)
}

View File

@ -1,86 +0,0 @@
package clistat
import (
"bufio"
"bytes"
"os"
"github.com/spf13/afero"
"golang.org/x/xerrors"
)
const (
procMounts = "/proc/mounts"
procOneCgroup = "/proc/1/cgroup"
sysCgroupType = "/sys/fs/cgroup/cgroup.type"
kubernetesDefaultServiceAccountToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint:gosec
)
func (s *Statter) IsContainerized() (ok bool, err error) {
return IsContainerized(s.fs)
}
// IsContainerized returns whether the host is containerized.
// This is adapted from https://github.com/elastic/go-sysinfo/tree/main/providers/linux/container.go#L31
// with modifications to support Sysbox containers.
// On non-Linux platforms, it always returns false.
func IsContainerized(fs afero.Fs) (ok bool, err error) {
cgData, err := afero.ReadFile(fs, procOneCgroup)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, xerrors.Errorf("read file %s: %w", procOneCgroup, err)
}
scn := bufio.NewScanner(bytes.NewReader(cgData))
for scn.Scan() {
line := scn.Bytes()
if bytes.Contains(line, []byte("docker")) ||
bytes.Contains(line, []byte(".slice")) ||
bytes.Contains(line, []byte("lxc")) ||
bytes.Contains(line, []byte("kubepods")) {
return true, nil
}
}
// Sometimes the above method of sniffing /proc/1/cgroup isn't reliable.
// If a Kubernetes service account token is present, that's
// also a good indication that we are in a container.
_, err = afero.ReadFile(fs, kubernetesDefaultServiceAccountToken)
if err == nil {
return true, nil
}
// Last-ditch effort to detect Sysbox containers.
// Check if we have anything mounted as type sysboxfs in /proc/mounts
mountsData, err := afero.ReadFile(fs, procMounts)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, xerrors.Errorf("read file %s: %w", procMounts, err)
}
scn = bufio.NewScanner(bytes.NewReader(mountsData))
for scn.Scan() {
line := scn.Bytes()
if bytes.Contains(line, []byte("sysboxfs")) {
return true, nil
}
}
// Adapted from https://github.com/systemd/systemd/blob/88bbf187a9b2ebe0732caa1e886616ae5f8186da/src/basic/virt.c#L603-L605
// The file `/sys/fs/cgroup/cgroup.type` does not exist on the root cgroup.
// If this file exists we can be sure we're in a container.
cgTypeExists, err := afero.Exists(fs, sysCgroupType)
if err != nil {
return false, xerrors.Errorf("check file exists %s: %w", sysCgroupType, err)
}
if cgTypeExists {
return true, nil
}
// If we get here, we are _probably_ not running in a container.
return false, nil
}

View File

@ -1,28 +0,0 @@
//go:build !windows
package clistat
import (
"syscall"
"tailscale.com/types/ptr"
)
// Disk returns the disk usage of the given path.
// If path is empty, it returns the usage of the root directory.
func (*Statter) Disk(p Prefix, path string) (*Result, error) {
if path == "" {
path = "/"
}
var stat syscall.Statfs_t
if err := syscall.Statfs(path, &stat); err != nil {
return nil, err
}
var r Result
// #nosec G115 - Safe conversion because stat.Bsize is always positive and within uint64 range
r.Total = ptr.To(float64(stat.Blocks * uint64(stat.Bsize)))
r.Used = float64(stat.Blocks-stat.Bfree) * float64(stat.Bsize)
r.Unit = "B"
r.Prefix = p
return &r, nil
}

View File

@ -1,36 +0,0 @@
package clistat
import (
"golang.org/x/sys/windows"
"tailscale.com/types/ptr"
)
// Disk returns the disk usage of the given path.
// If path is empty, it defaults to C:\
func (*Statter) Disk(p Prefix, path string) (*Result, error) {
if path == "" {
path = `C:\`
}
pathPtr, err := windows.UTF16PtrFromString(path)
if err != nil {
return nil, err
}
var freeBytes, totalBytes, availBytes uint64
if err := windows.GetDiskFreeSpaceEx(
pathPtr,
&freeBytes,
&totalBytes,
&availBytes,
); err != nil {
return nil, err
}
var r Result
r.Total = ptr.To(float64(totalBytes))
r.Used = float64(totalBytes - freeBytes)
r.Unit = "B"
r.Prefix = p
return &r, nil
}

View File

@ -1,236 +0,0 @@
package clistat
import (
"math"
"runtime"
"strconv"
"strings"
"time"
"github.com/elastic/go-sysinfo"
"github.com/spf13/afero"
"golang.org/x/xerrors"
"tailscale.com/types/ptr"
sysinfotypes "github.com/elastic/go-sysinfo/types"
)
// Prefix is a scale multiplier for a result.
// Used when creating a human-readable representation.
type Prefix float64
const (
PrefixDefault = 1.0
PrefixKibi = 1024.0
PrefixMebi = PrefixKibi * 1024.0
PrefixGibi = PrefixMebi * 1024.0
PrefixTebi = PrefixGibi * 1024.0
)
var (
PrefixHumanKibi = "Ki"
PrefixHumanMebi = "Mi"
PrefixHumanGibi = "Gi"
PrefixHumanTebi = "Ti"
)
func (s *Prefix) String() string {
switch *s {
case PrefixKibi:
return "Ki"
case PrefixMebi:
return "Mi"
case PrefixGibi:
return "Gi"
case PrefixTebi:
return "Ti"
default:
return ""
}
}
func ParsePrefix(s string) Prefix {
switch s {
case PrefixHumanKibi:
return PrefixKibi
case PrefixHumanMebi:
return PrefixMebi
case PrefixHumanGibi:
return PrefixGibi
case PrefixHumanTebi:
return PrefixTebi
default:
return PrefixDefault
}
}
// Result is a generic result type for a statistic.
// Total is the total amount of the resource available.
// It is nil if the resource is not a finite quantity.
// Unit is the unit of the resource.
// Used is the amount of the resource used.
type Result struct {
Total *float64 `json:"total"`
Unit string `json:"unit"`
Used float64 `json:"used"`
Prefix Prefix `json:"-"`
}
// String returns a human-readable representation of the result.
func (r *Result) String() string {
if r == nil {
return "-"
}
scale := 1.0
if r.Prefix != 0.0 {
scale = float64(r.Prefix)
}
var sb strings.Builder
var usedScaled, totalScaled float64
usedScaled = r.Used / scale
_, _ = sb.WriteString(humanizeFloat(usedScaled))
if r.Total != (*float64)(nil) {
_, _ = sb.WriteString("/")
totalScaled = *r.Total / scale
_, _ = sb.WriteString(humanizeFloat(totalScaled))
}
_, _ = sb.WriteString(" ")
_, _ = sb.WriteString(r.Prefix.String())
_, _ = sb.WriteString(r.Unit)
if r.Total != (*float64)(nil) && *r.Total > 0 {
_, _ = sb.WriteString(" (")
pct := r.Used / *r.Total * 100.0
_, _ = sb.WriteString(strconv.FormatFloat(pct, 'f', 0, 64))
_, _ = sb.WriteString("%)")
}
return strings.TrimSpace(sb.String())
}
func humanizeFloat(f float64) string {
// humanize.FtoaWithDigits does not round correctly.
prec := precision(f)
rat := math.Pow(10, float64(prec))
rounded := math.Round(f*rat) / rat
return strconv.FormatFloat(rounded, 'f', -1, 64)
}
// limit precision to 3 digits at most to preserve space
func precision(f float64) int {
fabs := math.Abs(f)
if fabs == 0.0 {
return 0
}
if fabs < 1.0 {
return 3
}
if fabs < 10.0 {
return 2
}
if fabs < 100.0 {
return 1
}
return 0
}
// Statter is a system statistics collector.
// It is a thin wrapper around the elastic/go-sysinfo library.
type Statter struct {
hi sysinfotypes.Host
fs afero.Fs
sampleInterval time.Duration
nproc int
wait func(time.Duration)
}
type Option func(*Statter)
// WithSampleInterval sets the sample interval for the statter.
func WithSampleInterval(d time.Duration) Option {
return func(s *Statter) {
s.sampleInterval = d
}
}
// WithFS sets the fs for the statter.
func WithFS(fs afero.Fs) Option {
return func(s *Statter) {
s.fs = fs
}
}
func New(opts ...Option) (*Statter, error) {
hi, err := sysinfo.Host()
if err != nil {
return nil, xerrors.Errorf("get host info: %w", err)
}
s := &Statter{
hi: hi,
fs: afero.NewReadOnlyFs(afero.NewOsFs()),
sampleInterval: 100 * time.Millisecond,
nproc: runtime.NumCPU(),
wait: func(d time.Duration) {
<-time.After(d)
},
}
for _, opt := range opts {
opt(s)
}
return s, nil
}
// HostCPU returns the CPU usage of the host. This is calculated by
// taking two samples of CPU usage and calculating the difference.
// Total will always be equal to the number of cores.
// Used will be an estimate of the number of cores used during the sample interval.
// This is calculated by taking the difference between the total and idle HostCPU time
// and scaling it by the number of cores.
// Units are in "cores".
func (s *Statter) HostCPU() (*Result, error) {
r := &Result{
Unit: "cores",
Total: ptr.To(float64(s.nproc)),
Prefix: PrefixDefault,
}
c1, err := s.hi.CPUTime()
if err != nil {
return nil, xerrors.Errorf("get first cpu sample: %w", err)
}
s.wait(s.sampleInterval)
c2, err := s.hi.CPUTime()
if err != nil {
return nil, xerrors.Errorf("get second cpu sample: %w", err)
}
total := c2.Total() - c1.Total()
if total == 0 {
return r, nil // no change
}
idle := c2.Idle - c1.Idle
used := total - idle
scaleFactor := float64(s.nproc) / total.Seconds()
r.Used = used.Seconds() * scaleFactor
return r, nil
}
// HostMemory returns the memory usage of the host, in gigabytes.
func (s *Statter) HostMemory(p Prefix) (*Result, error) {
r := &Result{
Unit: "B",
Prefix: p,
}
hm, err := s.hi.Memory()
if err != nil {
return nil, xerrors.Errorf("get memory info: %w", err)
}
r.Total = ptr.To(float64(hm.Total))
// On Linux, hm.Used equates to MemTotal - MemFree in /proc/stat.
// This includes buffers and cache.
// So use MemAvailable instead, which only equates to physical memory.
// On Windows, this is also calculated as Total - Available.
r.Used = float64(hm.Total - hm.Available)
return r, nil
}

View File

@ -1,433 +0,0 @@
package clistat
import (
"testing"
"time"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/types/ptr"
)
func TestResultString(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
Expected string
Result Result
}{
{
Expected: "1.23/5.68 quatloos (22%)",
Result: Result{Used: 1.234, Total: ptr.To(5.678), Unit: "quatloos"},
},
{
Expected: "0/0 HP",
Result: Result{Used: 0.0, Total: ptr.To(0.0), Unit: "HP"},
},
{
Expected: "123 seconds",
Result: Result{Used: 123.01, Total: nil, Unit: "seconds"},
},
{
Expected: "12.3",
Result: Result{Used: 12.34, Total: nil, Unit: ""},
},
{
Expected: "1.5 KiB",
Result: Result{Used: 1536, Total: nil, Unit: "B", Prefix: PrefixKibi},
},
{
Expected: "1.23 things",
Result: Result{Used: 1.234, Total: nil, Unit: "things"},
},
{
Expected: "0/100 TiB (0%)",
Result: Result{Used: 1, Total: ptr.To(100.0 * float64(PrefixTebi)), Unit: "B", Prefix: PrefixTebi},
},
{
Expected: "0.5/8 cores (6%)",
Result: Result{Used: 0.5, Total: ptr.To(8.0), Unit: "cores"},
},
} {
assert.Equal(t, tt.Expected, tt.Result.String())
}
}
func TestStatter(t *testing.T) {
t.Parallel()
// We cannot make many assertions about the data we get back
// for host-specific measurements because these tests could
// and should run successfully on any OS.
// The best we can do is assert that it is non-zero.
t.Run("HostOnly", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsHostOnly)
s, err := New(WithFS(fs))
require.NoError(t, err)
t.Run("HostCPU", func(t *testing.T) {
t.Parallel()
cpu, err := s.HostCPU()
require.NoError(t, err)
// assert.NotZero(t, cpu.Used) // HostCPU can sometimes be zero.
assert.NotZero(t, cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("HostMemory", func(t *testing.T) {
t.Parallel()
mem, err := s.HostMemory(PrefixDefault)
require.NoError(t, err)
assert.NotZero(t, mem.Used)
assert.NotZero(t, mem.Total)
assert.Equal(t, "B", mem.Unit)
})
t.Run("HostDisk", func(t *testing.T) {
t.Parallel()
disk, err := s.Disk(PrefixDefault, "") // default to home dir
require.NoError(t, err)
assert.NotZero(t, disk.Used)
assert.NotZero(t, disk.Total)
assert.Equal(t, "B", disk.Unit)
})
})
// Sometimes we do need to "fake" some stuff
// that happens while we wait.
withWait := func(waitF func(time.Duration)) Option {
return func(s *Statter) {
s.wait = waitF
}
}
// Other times we just want things to run fast.
withNoWait := func(s *Statter) {
s.wait = func(time.Duration) {}
}
// We don't want to use the actual host CPU here.
withNproc := func(n int) Option {
return func(s *Statter) {
s.nproc = n
}
}
// For container-specific measurements, everything we need
// can be read from the filesystem. We control the FS, so
// we control the data.
t.Run("CGroupV1", func(t *testing.T) {
t.Parallel()
t.Run("ContainerCPU/Limit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1)
fakeWait := func(time.Duration) {
// Fake 1 second in ns of usage
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
}
s, err := New(WithFS(fs), withWait(fakeWait))
require.NoError(t, err)
cpu, err := s.ContainerCPU()
require.NoError(t, err)
require.NotNil(t, cpu)
assert.Equal(t, 1.0, cpu.Used)
require.NotNil(t, cpu.Total)
assert.Equal(t, 2.5, *cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1NoLimit)
fakeWait := func(time.Duration) {
// Fake 1 second in ns of usage
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
}
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
require.NoError(t, err)
cpu, err := s.ContainerCPU()
require.NoError(t, err)
require.NotNil(t, cpu)
assert.Equal(t, 1.0, cpu.Used)
require.Nil(t, cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("ContainerCPU/AltPath", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1AltPath)
fakeWait := func(time.Duration) {
// Fake 1 second in ns of usage
mungeFS(t, fs, "/sys/fs/cgroup/cpuacct/cpuacct.usage", "100000000")
}
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
require.NoError(t, err)
cpu, err := s.ContainerCPU()
require.NoError(t, err)
require.NotNil(t, cpu)
assert.Equal(t, 1.0, cpu.Used)
require.NotNil(t, cpu.Total)
assert.Equal(t, 2.5, *cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("ContainerMemory", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1)
s, err := New(WithFS(fs), withNoWait)
require.NoError(t, err)
mem, err := s.ContainerMemory(PrefixDefault)
require.NoError(t, err)
require.NotNil(t, mem)
assert.Equal(t, 268435456.0, mem.Used)
assert.NotNil(t, mem.Total)
assert.Equal(t, 1073741824.0, *mem.Total)
assert.Equal(t, "B", mem.Unit)
})
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1NoLimit)
s, err := New(WithFS(fs), withNoWait)
require.NoError(t, err)
mem, err := s.ContainerMemory(PrefixDefault)
require.NoError(t, err)
require.NotNil(t, mem)
assert.Equal(t, 268435456.0, mem.Used)
assert.Nil(t, mem.Total)
assert.Equal(t, "B", mem.Unit)
})
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV1DockerNoMemoryLimit)
s, err := New(WithFS(fs), withNoWait)
require.NoError(t, err)
mem, err := s.ContainerMemory(PrefixDefault)
require.NoError(t, err)
require.NotNil(t, mem)
assert.Equal(t, 268435456.0, mem.Used)
assert.Nil(t, mem.Total)
assert.Equal(t, "B", mem.Unit)
})
})
t.Run("CGroupV2", func(t *testing.T) {
t.Parallel()
t.Run("ContainerCPU/Limit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV2)
fakeWait := func(time.Duration) {
mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
}
s, err := New(WithFS(fs), withWait(fakeWait))
require.NoError(t, err)
cpu, err := s.ContainerCPU()
require.NoError(t, err)
require.NotNil(t, cpu)
assert.Equal(t, 1.0, cpu.Used)
require.NotNil(t, cpu.Total)
assert.Equal(t, 2.5, *cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("ContainerCPU/NoLimit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV2NoLimit)
fakeWait := func(time.Duration) {
mungeFS(t, fs, cgroupV2CPUStat, "usage_usec 100000")
}
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait))
require.NoError(t, err)
cpu, err := s.ContainerCPU()
require.NoError(t, err)
require.NotNil(t, cpu)
assert.Equal(t, 1.0, cpu.Used)
require.Nil(t, cpu.Total)
assert.Equal(t, "cores", cpu.Unit)
})
t.Run("ContainerMemory/Limit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV2)
s, err := New(WithFS(fs), withNoWait)
require.NoError(t, err)
mem, err := s.ContainerMemory(PrefixDefault)
require.NoError(t, err)
require.NotNil(t, mem)
assert.Equal(t, 268435456.0, mem.Used)
assert.NotNil(t, mem.Total)
assert.Equal(t, 1073741824.0, *mem.Total)
assert.Equal(t, "B", mem.Unit)
})
t.Run("ContainerMemory/NoLimit", func(t *testing.T) {
t.Parallel()
fs := initFS(t, fsContainerCgroupV2NoLimit)
s, err := New(WithFS(fs), withNoWait)
require.NoError(t, err)
mem, err := s.ContainerMemory(PrefixDefault)
require.NoError(t, err)
require.NotNil(t, mem)
assert.Equal(t, 268435456.0, mem.Used)
assert.Nil(t, mem.Total)
assert.Equal(t, "B", mem.Unit)
})
})
}
func TestIsContainerized(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
Name string
FS map[string]string
Expected bool
Error string
}{
{
Name: "Empty",
FS: map[string]string{},
Expected: false,
Error: "",
},
{
Name: "BareMetal",
FS: fsHostOnly,
Expected: false,
Error: "",
},
{
Name: "Docker",
FS: fsContainerCgroupV1,
Expected: true,
Error: "",
},
{
Name: "Sysbox",
FS: fsContainerSysbox,
Expected: true,
Error: "",
},
{
Name: "Docker (Cgroupns=private)",
FS: fsContainerCgroupV2PrivateCgroupns,
Expected: true,
Error: "",
},
} {
tt := tt
t.Run(tt.Name, func(t *testing.T) {
t.Parallel()
fs := initFS(t, tt.FS)
actual, err := IsContainerized(fs)
if tt.Error == "" {
assert.NoError(t, err)
assert.Equal(t, tt.Expected, actual)
} else {
assert.ErrorContains(t, err, tt.Error)
assert.False(t, actual)
}
})
}
}
// helper function for initializing a fs
func initFS(t testing.TB, m map[string]string) afero.Fs {
t.Helper()
fs := afero.NewMemMapFs()
for k, v := range m {
mungeFS(t, fs, k, v)
}
return fs
}
// helper function for writing v to fs under path k
func mungeFS(t testing.TB, fs afero.Fs, k, v string) {
t.Helper()
require.NoError(t, afero.WriteFile(fs, k, []byte(v+"\n"), 0o600))
}
var (
fsHostOnly = map[string]string{
procOneCgroup: "0::/",
procMounts: "/dev/sda1 / ext4 rw,relatime 0 0",
}
fsContainerSysbox = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
sysboxfs /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV2CPUMax: "250000 100000",
cgroupV2CPUStat: "usage_usec 0",
}
fsContainerCgroupV2 = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV2CPUMax: "250000 100000",
cgroupV2CPUStat: "usage_usec 0",
cgroupV2MemoryMaxBytes: "1073741824",
cgroupV2MemoryUsageBytes: "536870912",
cgroupV2MemoryStat: "inactive_file 268435456",
}
fsContainerCgroupV2NoLimit = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV2CPUMax: "max 100000",
cgroupV2CPUStat: "usage_usec 0",
cgroupV2MemoryMaxBytes: "max",
cgroupV2MemoryUsageBytes: "536870912",
cgroupV2MemoryStat: "inactive_file 268435456",
}
fsContainerCgroupV2PrivateCgroupns = map[string]string{
procOneCgroup: "0::/",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
sysCgroupType: "domain",
}
fsContainerCgroupV1 = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV1CPUAcctUsage: "0",
cgroupV1CFSQuotaUs: "250000",
cgroupV1CFSPeriodUs: "100000",
cgroupV1MemoryMaxUsageBytes: "1073741824",
cgroupV1MemoryUsageBytes: "536870912",
cgroupV1MemoryStat: "total_inactive_file 268435456",
}
fsContainerCgroupV1NoLimit = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV1CPUAcctUsage: "0",
cgroupV1CFSQuotaUs: "-1",
cgroupV1CFSPeriodUs: "100000",
cgroupV1MemoryMaxUsageBytes: "max", // I have never seen this in the wild
cgroupV1MemoryUsageBytes: "536870912",
cgroupV1MemoryStat: "total_inactive_file 268435456",
}
fsContainerCgroupV1DockerNoMemoryLimit = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
cgroupV1CPUAcctUsage: "0",
cgroupV1CFSQuotaUs: "-1",
cgroupV1CFSPeriodUs: "100000",
cgroupV1MemoryMaxUsageBytes: "9223372036854771712",
cgroupV1MemoryUsageBytes: "536870912",
cgroupV1MemoryStat: "total_inactive_file 268435456",
}
fsContainerCgroupV1AltPath = map[string]string{
procOneCgroup: "0::/docker/aa86ac98959eeedeae0ecb6e0c9ddd8ae8b97a9d0fdccccf7ea7a474f4e0bb1f",
procMounts: `overlay / overlay rw,relatime,lowerdir=/some/path:/some/path,upperdir=/some/path:/some/path,workdir=/some/path:/some/path 0 0
proc /proc/sys proc ro,nosuid,nodev,noexec,relatime 0 0`,
"/sys/fs/cgroup/cpuacct/cpuacct.usage": "0",
"/sys/fs/cgroup/cpu/cpu.cfs_quota_us": "250000",
"/sys/fs/cgroup/cpu/cpu.cfs_period_us": "100000",
cgroupV1MemoryMaxUsageBytes: "1073741824",
cgroupV1MemoryUsageBytes: "536870912",
cgroupV1MemoryStat: "total_inactive_file 268435456",
}
)

View File

@ -7,7 +7,7 @@ import (
"github.com/spf13/afero"
"golang.org/x/xerrors"
"github.com/coder/coder/v2/cli/clistat"
"github.com/coder/clistat"
"github.com/coder/coder/v2/cli/cliui"
"github.com/coder/serpent"
)
@ -67,7 +67,7 @@ func (r *RootCmd) stat() *serpent.Command {
}()
go func() {
defer close(containerErr)
if ok, _ := clistat.IsContainerized(fs); !ok {
if ok, _ := st.IsContainerized(); !ok {
// don't error if we're not in a container
return
}
@ -104,7 +104,7 @@ func (r *RootCmd) stat() *serpent.Command {
sr.Disk = ds
// Container-only stats.
if ok, err := clistat.IsContainerized(fs); err == nil && ok {
if ok, err := st.IsContainerized(); err == nil && ok {
cs, err := st.ContainerCPU()
if err != nil {
return err
@ -150,7 +150,7 @@ func (*RootCmd) statCPU(fs afero.Fs) *serpent.Command {
Handler: func(inv *serpent.Invocation) error {
var cs *clistat.Result
var err error
if ok, _ := clistat.IsContainerized(fs); ok && !hostArg {
if ok, _ := st.IsContainerized(); ok && !hostArg {
cs, err = st.ContainerCPU()
} else {
cs, err = st.HostCPU()
@ -204,7 +204,7 @@ func (*RootCmd) statMem(fs afero.Fs) *serpent.Command {
pfx := clistat.ParsePrefix(prefixArg)
var ms *clistat.Result
var err error
if ok, _ := clistat.IsContainerized(fs); ok && !hostArg {
if ok, _ := st.IsContainerized(); ok && !hostArg {
ms, err = st.ContainerMemory(pfx)
} else {
ms, err = st.HostMemory(pfx)

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/coder/coder/v2/cli/clistat"
"github.com/coder/clistat"
"github.com/coder/coder/v2/cli/clitest"
"github.com/coder/coder/v2/testutil"
)

6
go.mod
View File

@ -103,7 +103,7 @@ require (
github.com/dave/dst v0.27.2
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/dblohm7/wingoes v0.0.0-20240820181039-f2b84150679e
github.com/elastic/go-sysinfo v1.15.0
github.com/elastic/go-sysinfo v1.15.1
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21
github.com/emersion/go-smtp v0.21.2
github.com/fatih/color v1.18.0
@ -209,7 +209,7 @@ require (
gvisor.dev/gvisor v0.0.0-20240509041132-65b30f7869dc
kernel.org/pub/linux/libs/security/libcap/cap v1.2.73
storj.io/drpc v0.0.33
tailscale.com v1.46.1
tailscale.com v1.80.3
)
require (
@ -469,6 +469,8 @@ require (
sigs.k8s.io/yaml v1.4.0 // indirect
)
require github.com/coder/clistat v1.0.0
require (
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect

6
go.sum
View File

@ -220,6 +220,8 @@ github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vc
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/coder/bubbletea v1.2.2-0.20241212190825-007a1cdb2c41 h1:SBN/DA63+ZHwuWwPHPYoCZ/KLAjHv5g4h2MS4f2/MTI=
github.com/coder/bubbletea v1.2.2-0.20241212190825-007a1cdb2c41/go.mod h1:I9ULxr64UaOSUv7hcb3nX4kowodJCVS7vt7VVJk/kW4=
github.com/coder/clistat v1.0.0 h1:MjiS7qQ1IobuSSgDnxcCSyBPESs44hExnh2TEqMcGnA=
github.com/coder/clistat v1.0.0/go.mod h1:F+gLef+F9chVrleq808RBxdaoq52R4VLopuLdAsh8Y4=
github.com/coder/flog v1.1.0 h1:kbAes1ai8fIS5OeV+QAnKBQE22ty1jRF/mcAwHpLBa4=
github.com/coder/flog v1.1.0/go.mod h1:UQlQvrkJBvnRGo69Le8E24Tcl5SJleAAR7gYEHzAmdQ=
github.com/coder/go-httpstat v0.0.0-20230801153223-321c88088322 h1:m0lPZjlQ7vdVpRBPKfYIFlmgevoTkBxB10wv6l2gOaU=
@ -307,8 +309,8 @@ github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/go-sysinfo v1.15.0 h1:54pRFlAYUlVNQ2HbXzLVZlV+fxS7Eax49stzg95M4Xw=
github.com/elastic/go-sysinfo v1.15.0/go.mod h1:jPSuTgXG+dhhh0GKIyI2Cso+w5lPJ5PvVqKlL8LV/Hk=
github.com/elastic/go-sysinfo v1.15.1 h1:zBmTnFEXxIQ3iwcQuk7MzaUotmKRp3OabbbWM8TdzIQ=
github.com/elastic/go-sysinfo v1.15.1/go.mod h1:jPSuTgXG+dhhh0GKIyI2Cso+w5lPJ5PvVqKlL8LV/Hk=
github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21 h1:OJyUGMJTzHTd1XQp98QTaHernxMYzRaOasRir9hUlFQ=