chore: migrate from wrapped sync.Pool to HashTrieMap

This should lower memory consumption because HashTrieMap doesn't use any and doesn't have double maps.

Signed-off-by: Dmitriy Matrenichev <dmitry.matrenichev@siderolabs.com>
This commit is contained in:
Dmitriy Matrenichev
2024-05-28 03:02:13 +03:00
parent 8a7a0d4a43
commit a2217bd298
3 changed files with 27 additions and 31 deletions

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-19T19:39:10Z by kres dccd292.
# Generated on 2024-05-28T08:54:28Z by kres a914cae.
# options for analysis running
run:
@ -13,8 +13,8 @@ run:
# output configuration options
output:
formats:
- format: colored-line-number
path: stdout
- format: colored-line-number
path: stdout
print-issued-lines: true
print-linter-name: true
uniq-by-line: true
@ -94,17 +94,21 @@ linters-settings:
cyclop:
# the maximal code complexity to report
max-complexity: 20
# depguard:
# Main:
# deny:
# - github.com/OpenPeeDeeP/depguard # this is just an example
depguard:
rules:
prevent_unmaintained_packages:
list-mode: lax # allow unless explicitly denied
files:
- $all
deny:
- pkg: io/ioutil
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
linters:
enable-all: true
disable-all: false
fast: false
disable:
- exhaustivestruct
- exhaustruct
- err113
- forbidigo
@ -120,30 +124,17 @@ linters:
- mnd
- nestif
- nonamedreturns
- nosnakecase
- paralleltest
- tagalign
- tagliatelle
- thelper
- typecheck
- varnamelen
- wrapcheck
- depguard # Disabled because starting with golangci-lint 1.53.0 it doesn't allow denylist alone anymore
- testifylint # complains about our assert recorder and has a number of false positives for assert.Greater(t, thing, 1)
- protogetter # complains about us using Value field on typed spec, instead of GetValue which has a different signature
- perfsprint # complains about us using fmt.Sprintf in non-performance critical code, updating just kres took too long
# abandoned linters for which golangci shows the warning that the repo is archived by the owner
- deadcode
- golint
- ifshort
- interfacer
- maligned
- scopelint
- structcheck
- varcheck
# disabled as it seems to be broken - goes into imported libraries and reports issues there
- musttag
- goimports # same as gci
- musttag # seems to be broken - goes into imported libraries and reports issues there
issues:
exclude: [ ]

View File

@ -6,6 +6,7 @@
package state
import (
"fmt"
"slices"
"github.com/siderolabs/gen/xslices"
@ -23,7 +24,7 @@ func (state *State) ExportClusterSnapshots(f func(snapshot *storagepb.ClusterSna
// reuse the same snapshotin each iteration
clusterSnapshot := &storagepb.ClusterSnapshot{}
state.clusters.Range(func(_ string, cluster *Cluster) bool {
state.clusters.Enumerate(func(_ string, cluster *Cluster) bool {
snapshotCluster(cluster, clusterSnapshot)
err = f(clusterSnapshot)
@ -50,7 +51,10 @@ func (state *State) ImportClusterSnapshots(f func() (*storagepb.ClusterSnapshot,
cluster := clusterFromSnapshot(clusterSnapshot)
state.clusters.Store(cluster.id, cluster)
_, loaded := state.clusters.LoadOrStore(cluster.id, cluster)
if loaded {
return fmt.Errorf("cluster %q already exists", cluster.id)
}
}
return nil

View File

@ -11,13 +11,13 @@ import (
"time"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/siderolabs/gen/containers"
"github.com/siderolabs/gen/concurrent"
"go.uber.org/zap"
)
// State keeps the discovery service state.
type State struct { //nolint:govet
clusters containers.SyncMap[string, *Cluster]
type State struct {
clusters *concurrent.HashTrieMap[string, *Cluster]
logger *zap.Logger
mClustersDesc *prom.Desc
@ -32,7 +32,8 @@ type State struct { //nolint:govet
// NewState create new instance of State.
func NewState(logger *zap.Logger) *State {
return &State{
logger: logger,
clusters: concurrent.NewHashTrieMap[string, *Cluster](),
logger: logger,
mClustersDesc: prom.NewDesc(
"discovery_state_clusters",
"The current number of clusters in the state.",
@ -84,12 +85,12 @@ func (state *State) GetCluster(id string) *Cluster {
// GarbageCollect recursively each cluster, and remove empty clusters.
func (state *State) GarbageCollect(now time.Time) (removedClusters, removedAffiliates int) {
state.clusters.Range(func(key string, cluster *Cluster) bool {
state.clusters.Enumerate(func(key string, cluster *Cluster) bool {
ra, empty := cluster.GarbageCollect(now)
removedAffiliates += ra
if empty {
state.clusters.Delete(key)
state.clusters.CompareAndDelete(key, cluster)
state.logger.Debug("cluster removed", zap.String("cluster_id", key))
removedClusters++
@ -137,7 +138,7 @@ func (state *State) RunGC(ctx context.Context, logger *zap.Logger, interval time
}
func (state *State) stats() (clusters, affiliates, endpoints, subscriptions int) {
state.clusters.Range(func(_ string, cluster *Cluster) bool {
state.clusters.Enumerate(func(_ string, cluster *Cluster) bool {
clusters++
a, e, s := cluster.stats()