qryn/wasm_parts/vendor.diff
2024-03-11 19:42:52 +02:00

5616 lines
170 KiB
Diff

# This is a patch for vendor to update it to _vendor
#
# To apply this patch:
# STEP 1: Chdir to the source directory.
# STEP 2: Run the 'applypatch' program with this patch file as input.
#
# If you do not have 'applypatch', it is part of the 'makepatch' package
# that you can fetch from the Comprehensive Perl Archive Network:
# http://www.perl.com/CPAN/authors/Johan_Vromans/makepatch-x.y.tar.gz
# In the above URL, 'x' should be 2 or higher.
#
# To apply this patch without the use of 'applypatch':
# STEP 1: Chdir to the source directory.
# If you have a decent Bourne-type shell:
# STEP 2: Run the shell with this file as input.
# If you don't have such a shell, you may need to manually create/delete
# the files/directories as shown below.
# STEP 3: Run the 'patch' program with this file as input.
#
# These are the commands needed to create/delete files/directories:
#
mkdir 'github.com/pquerna/ffjson/inception'
chmod 0775 'github.com/pquerna/ffjson/inception'
mkdir 'github.com/pquerna/ffjson/shared'
chmod 0775 'github.com/pquerna/ffjson/shared'
rm -f 'github.com/prometheus/prometheus/util/teststorage/storage.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal/watcher.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal/wal.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal/reader.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal/live_reader.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal/checkpoint.go'
rm -f 'github.com/prometheus/prometheus/tsdb/wal.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tsdbblockutil.go'
rm -f 'github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go'
rm -f 'github.com/prometheus/prometheus/tsdb/repair.go'
rm -f 'github.com/prometheus/prometheus/tsdb/record/record.go'
rm -f 'github.com/prometheus/prometheus/tsdb/querier.go'
rm -f 'github.com/prometheus/prometheus/tsdb/isolation.go'
rm -f 'github.com/prometheus/prometheus/tsdb/index/postingsstats.go'
rm -f 'github.com/prometheus/prometheus/tsdb/index/postings.go'
rm -f 'github.com/prometheus/prometheus/tsdb/index/index.go'
rm -f 'github.com/prometheus/prometheus/tsdb/head_wal.go'
rm -f 'github.com/prometheus/prometheus/tsdb/head_read.go'
rm -f 'github.com/prometheus/prometheus/tsdb/head_append.go'
rm -f 'github.com/prometheus/prometheus/tsdb/head.go'
rm -f 'github.com/prometheus/prometheus/tsdb/goversion/init.go'
rm -f 'github.com/prometheus/prometheus/tsdb/goversion/goversion.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/sync.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_darwin.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_windows.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_js.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_arm64.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/mmap.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock_windows.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/flock.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go'
rm -f 'github.com/prometheus/prometheus/tsdb/fileutil/dir.go'
rm -f 'github.com/prometheus/prometheus/tsdb/exemplar.go'
rm -f 'github.com/prometheus/prometheus/tsdb/errors/errors.go'
rm -f 'github.com/prometheus/prometheus/tsdb/encoding/encoding.go'
rm -f 'github.com/prometheus/prometheus/tsdb/db.go'
rm -f 'github.com/prometheus/prometheus/tsdb/compact.go'
rm -f 'github.com/prometheus/prometheus/tsdb/chunks/queue.go'
rm -f 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go'
rm -f 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go'
rm -f 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go'
rm -f 'github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go'
rm -f 'github.com/prometheus/prometheus/tsdb/blockwriter.go'
rm -f 'github.com/prometheus/prometheus/tsdb/block.go'
rm -f 'github.com/prometheus/prometheus/tsdb/README.md'
rm -f 'github.com/prometheus/prometheus/tsdb/CHANGELOG.md'
rm -f 'github.com/prometheus/prometheus/tsdb/.gitignore'
rm -f 'github.com/prometheus/prometheus/storage/series.go'
rm -f 'github.com/prometheus/prometheus/storage/secondary.go'
rm -f 'github.com/prometheus/prometheus/storage/merge.go'
rm -f 'github.com/prometheus/prometheus/storage/lazy.go'
rm -f 'github.com/prometheus/prometheus/storage/fanout.go'
rm -f 'github.com/prometheus/prometheus/promql/test.go'
rm -f 'github.com/prometheus/prometheus/promql/query_logger.go'
touch 'github.com/pquerna/ffjson/inception/decoder.go'
chmod 0664 'github.com/pquerna/ffjson/inception/decoder.go'
touch 'github.com/pquerna/ffjson/inception/decoder_tpl.go'
chmod 0664 'github.com/pquerna/ffjson/inception/decoder_tpl.go'
touch 'github.com/pquerna/ffjson/inception/encoder.go'
chmod 0664 'github.com/pquerna/ffjson/inception/encoder.go'
touch 'github.com/pquerna/ffjson/inception/encoder_tpl.go'
chmod 0664 'github.com/pquerna/ffjson/inception/encoder_tpl.go'
touch 'github.com/pquerna/ffjson/inception/inception.go'
chmod 0664 'github.com/pquerna/ffjson/inception/inception.go'
touch 'github.com/pquerna/ffjson/inception/reflect.go'
chmod 0664 'github.com/pquerna/ffjson/inception/reflect.go'
touch 'github.com/pquerna/ffjson/inception/tags.go'
chmod 0664 'github.com/pquerna/ffjson/inception/tags.go'
touch 'github.com/pquerna/ffjson/inception/template.go'
chmod 0664 'github.com/pquerna/ffjson/inception/template.go'
touch 'github.com/pquerna/ffjson/inception/writerstack.go'
chmod 0664 'github.com/pquerna/ffjson/inception/writerstack.go'
touch 'github.com/pquerna/ffjson/shared/options.go'
chmod 0664 'github.com/pquerna/ffjson/shared/options.go'
rmdir 'github.com/prometheus/prometheus/util/teststorage'
rmdir 'github.com/prometheus/prometheus/tsdb/wal'
rmdir 'github.com/prometheus/prometheus/tsdb/tsdbutil'
rmdir 'github.com/prometheus/prometheus/tsdb/tombstones'
rmdir 'github.com/prometheus/prometheus/tsdb/record'
rmdir 'github.com/prometheus/prometheus/tsdb/index'
rmdir 'github.com/prometheus/prometheus/tsdb/goversion'
rmdir 'github.com/prometheus/prometheus/tsdb/fileutil'
rmdir 'github.com/prometheus/prometheus/tsdb/errors'
rmdir 'github.com/prometheus/prometheus/tsdb/encoding'
#
# This command terminates the shell and need not be executed manually.
exit
#
#### End of Preamble ####
#### Patch data follows ####
diff -c 'vendor/github.com/alecthomas/participle/v2/validate.go' '_vendor/github.com/alecthomas/participle/v2/validate.go'
Index: ./github.com/alecthomas/participle/v2/validate.go
*** ./github.com/alecthomas/participle/v2/validate.go Mon Mar 11 19:34:50 2024
--- ./github.com/alecthomas/participle/v2/validate.go Thu Oct 26 15:52:53 2023
***************
*** 42,48 ****
case *sequence:
if !n.head {
! panic("done")
}
}
if seen[n] {
--- 42,48 ----
case *sequence:
if !n.head {
! return nil
}
}
if seen[n] {
diff -c 'vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go' '_vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go'
Index: ./github.com/aws/aws-sdk-go/aws/defaults/defaults.go
*** ./github.com/aws/aws-sdk-go/aws/defaults/defaults.go Mon Mar 11 19:34:50 2024
--- ./github.com/aws/aws-sdk-go/aws/defaults/defaults.go Thu Oct 26 15:21:07 2023
***************
*** 132,138 ****
return ec2RoleProvider(cfg, handlers)
}
! var lookupHostFn = net.LookupHost
func isLoopbackHost(host string) (bool, error) {
ip := net.ParseIP(host)
--- 132,140 ----
return ec2RoleProvider(cfg, handlers)
}
! var lookupHostFn = func (string) ([]string, error) {
! return nil, nil
! }
func isLoopbackHost(host string) (bool, error) {
ip := net.ParseIP(host)
diff -c 'vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go' '_vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go'
Index: ./github.com/aws/aws-sdk-go/aws/request/retryer.go
*** ./github.com/aws/aws-sdk-go/aws/request/retryer.go Mon Mar 11 19:34:50 2024
--- ./github.com/aws/aws-sdk-go/aws/request/retryer.go Thu Oct 26 15:21:07 2023
***************
*** 1,7 ****
package request
import (
- "net"
"net/url"
"strings"
"time"
--- 1,6 ----
***************
*** 200,208 ****
return shouldRetryError(err.Err)
case temporary:
- if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
- return true
- }
// If the error is temporary, we want to allow continuation of the
// retry process
return err.Temporary() || isErrConnectionReset(origErr)
--- 199,204 ----
diff -c 'vendor/github.com/davecgh/go-spew/spew/bypass.go' '_vendor/github.com/davecgh/go-spew/spew/bypass.go'
Index: ./github.com/davecgh/go-spew/spew/bypass.go
*** ./github.com/davecgh/go-spew/spew/bypass.go Mon Mar 11 19:34:50 2024
--- ./github.com/davecgh/go-spew/spew/bypass.go Thu Oct 26 15:21:07 2023
***************
*** 68,80 ****
addr: 1 << 8,
}}
! var flagValOffset = func() uintptr {
! field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
! if !ok {
! panic("reflect.Value has no flag field")
! }
! return field.Offset
! }()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
--- 68,74 ----
addr: 1 << 8,
}}
! var flagValOffset = (uintptr)(0)
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
***************
*** 102,145 ****
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
- func init() {
- field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
- if !ok {
- panic("reflect.Value has no flag field")
- }
- if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
- panic("reflect.Value flag field has changed kind")
- }
- type t0 int
- var t struct {
- A t0
- // t0 will have flagEmbedRO set.
- t0
- // a will have flagStickyRO set
- a t0
- }
- vA := reflect.ValueOf(t).FieldByName("A")
- va := reflect.ValueOf(t).FieldByName("a")
- vt0 := reflect.ValueOf(t).FieldByName("t0")
-
- // Infer flagRO from the difference between the flags
- // for the (otherwise identical) fields in t.
- flagPublic := *flagField(&vA)
- flagWithRO := *flagField(&va) | *flagField(&vt0)
- flagRO = flagPublic ^ flagWithRO
-
- // Infer flagAddr from the difference between a value
- // taken from a pointer and not.
- vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
- flagNoPtr := *flagField(&vA)
- flagPtr := *flagField(&vPtrA)
- flagAddr = flagNoPtr ^ flagPtr
-
- // Check that the inferred flags tally with one of the known versions.
- for _, f := range okFlags {
- if flagRO == f.ro && flagAddr == f.addr {
- return
- }
- }
- panic("reflect.Value read-only flag has changed semantics")
- }
--- 96,98 ----
diff -c 'vendor/github.com/edsrzf/mmap-go/mmap.go' '_vendor/github.com/edsrzf/mmap-go/mmap.go'
Index: ./github.com/edsrzf/mmap-go/mmap.go
*** ./github.com/edsrzf/mmap-go/mmap.go Mon Mar 11 19:34:50 2024
--- ./github.com/edsrzf/mmap-go/mmap.go Thu Oct 26 15:21:07 2023
***************
*** 15,21 ****
package mmap
import (
- "errors"
"os"
"reflect"
"unsafe"
--- 15,20 ----
***************
*** 54,80 ****
// If length < 0, the entire file will be mapped.
// If ANON is set in flags, f is ignored.
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
! if offset%int64(os.Getpagesize()) != 0 {
! return nil, errors.New("offset parameter must be a multiple of the system's page size")
! }
!
! var fd uintptr
! if flags&ANON == 0 {
! fd = uintptr(f.Fd())
! if length < 0 {
! fi, err := f.Stat()
! if err != nil {
! return nil, err
! }
! length = int(fi.Size())
! }
! } else {
! if length <= 0 {
! return nil, errors.New("anonymous mapping requires non-zero length")
! }
! fd = ^uintptr(0)
! }
! return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
}
func (m *MMap) header() *reflect.SliceHeader {
--- 53,59 ----
// If length < 0, the entire file will be mapped.
// If ANON is set in flags, f is ignored.
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
! return nil, nil
}
func (m *MMap) header() *reflect.SliceHeader {
***************
*** 89,107 ****
// Lock keeps the mapped region in physical memory, ensuring that it will not be
// swapped out.
func (m MMap) Lock() error {
! return m.lock()
}
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
// be swapped out.
// If m is already unlocked, aan error will result.
func (m MMap) Unlock() error {
! return m.unlock()
}
// Flush synchronizes the mapping's contents to the file's contents on disk.
func (m MMap) Flush() error {
! return m.flush()
}
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
--- 68,86 ----
// Lock keeps the mapped region in physical memory, ensuring that it will not be
// swapped out.
func (m MMap) Lock() error {
! return nil
}
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
// be swapped out.
// If m is already unlocked, aan error will result.
func (m MMap) Unlock() error {
! return nil
}
// Flush synchronizes the mapping's contents to the file's contents on disk.
func (m MMap) Flush() error {
! return nil
}
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
***************
*** 111,117 ****
// Unmap should only be called on the slice value that was originally returned from
// a call to Map. Calling Unmap on a derived slice may cause errors.
func (m *MMap) Unmap() error {
! err := m.unmap()
! *m = nil
! return err
}
--- 90,94 ----
// Unmap should only be called on the slice value that was originally returned from
// a call to Map. Calling Unmap on a derived slice may cause errors.
func (m *MMap) Unmap() error {
! return nil
}
diff -c 'vendor/github.com/grafana/regexp/backtrack.go' '_vendor/github.com/grafana/regexp/backtrack.go'
Index: ./github.com/grafana/regexp/backtrack.go
*** ./github.com/grafana/regexp/backtrack.go Mon Mar 11 19:34:50 2024
--- ./github.com/grafana/regexp/backtrack.go Mon Mar 11 11:17:18 2024
***************
*** 16,22 ****
import (
"regexp/syntax"
- "sync"
)
// A job is an entry on the backtracker's job stack. It holds
--- 16,21 ----
***************
*** 44,50 ****
inputs inputs
}
! var bitStatePool sync.Pool
func newBitState() *bitState {
b, ok := bitStatePool.Get().(*bitState)
--- 43,53 ----
inputs inputs
}
! type fakePool[T any] struct {}
! func (f fakePool[T]) Get() interface{} { return new(T) }
! func (f fakePool[T]) Put(x any) { }
!
! var bitStatePool fakePool[bitState]
func newBitState() *bitState {
b, ok := bitStatePool.Get().(*bitState)
diff -c 'vendor/github.com/grafana/regexp/exec.go' '_vendor/github.com/grafana/regexp/exec.go'
Index: ./github.com/grafana/regexp/exec.go
*** ./github.com/grafana/regexp/exec.go Mon Mar 11 19:34:50 2024
--- ./github.com/grafana/regexp/exec.go Mon Mar 11 11:17:07 2024
***************
*** 7,13 ****
import (
"io"
"regexp/syntax"
- "sync"
)
// A queue is a 'sparse array' holding pending threads of execution.
--- 7,12 ----
***************
*** 377,384 ****
inputs inputs
matchcap []int
}
!
! var onePassPool sync.Pool
func newOnePassMachine() *onePassMachine {
m, ok := onePassPool.Get().(*onePassMachine)
--- 376,382 ----
inputs inputs
matchcap []int
}
! var onePassPool fakePool[onePassMachine]
func newOnePassMachine() *onePassMachine {
m, ok := onePassPool.Get().(*onePassMachine)
diff -c 'vendor/github.com/grafana/regexp/regexp.go' '_vendor/github.com/grafana/regexp/regexp.go'
Index: ./github.com/grafana/regexp/regexp.go
*** ./github.com/grafana/regexp/regexp.go Mon Mar 11 19:34:50 2024
--- ./github.com/grafana/regexp/regexp.go Mon Mar 11 18:14:29 2024
***************
*** 71,77 ****
"regexp/syntax"
"strconv"
"strings"
- "sync"
"unicode"
"unicode/utf8"
)
--- 71,76 ----
***************
*** 225,231 ****
// The final matchPool is a catch-all for very large queues.
var (
matchSize = [...]int{128, 512, 2048, 16384, 0}
! matchPool [len(matchSize)]sync.Pool
)
// get returns a machine to use for matching re.
--- 224,230 ----
// The final matchPool is a catch-all for very large queues.
var (
matchSize = [...]int{128, 512, 2048, 16384, 0}
! matchPool [len(matchSize)]fakePool[machine]
)
// get returns a machine to use for matching re.
diff -c 'vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go' '_vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go'
Index: ./github.com/mwitkow/go-conntrack/dialer_reporter.go
*** ./github.com/mwitkow/go-conntrack/dialer_reporter.go Mon Mar 11 19:34:50 2024
--- ./github.com/mwitkow/go-conntrack/dialer_reporter.go Thu Oct 26 15:21:07 2023
***************
*** 87,95 ****
func reportDialerConnFailed(dialerName string, err error) {
if netErr, ok := err.(*net.OpError); ok {
switch nestErr := netErr.Err.(type) {
- case *net.DNSError:
- dialerConnFailedTotal.WithLabelValues(dialerName, string(failedResolution)).Inc()
- return
case *os.SyscallError:
if nestErr.Err == syscall.ECONNREFUSED {
dialerConnFailedTotal.WithLabelValues(dialerName, string(failedConnRefused)).Inc()
--- 87,92 ----
***************
*** 97,105 ****
dialerConnFailedTotal.WithLabelValues(dialerName, string(failedUnknown)).Inc()
return
}
- if netErr.Timeout() {
- dialerConnFailedTotal.WithLabelValues(dialerName, string(failedTimeout)).Inc()
- }
} else if err == context.Canceled || err == context.DeadlineExceeded {
dialerConnFailedTotal.WithLabelValues(dialerName, string(failedTimeout)).Inc()
return
--- 94,99 ----
diff -c 'vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go' '_vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go'
Index: ./github.com/mwitkow/go-conntrack/listener_wrapper.go
*** ./github.com/mwitkow/go-conntrack/listener_wrapper.go Mon Mar 11 19:34:50 2024
--- ./github.com/mwitkow/go-conntrack/listener_wrapper.go Thu Oct 26 15:21:07 2023
***************
*** 109,118 ****
if err != nil {
return nil, err
}
- if tcpConn, ok := conn.(*net.TCPConn); ok && ct.opts.tcpKeepAlive > 0 {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(ct.opts.tcpKeepAlive)
- }
return newServerConnTracker(conn, ct.opts), nil
}
--- 109,114 ----
diff -c 'vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go' '_vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go'
Index: ./github.com/pquerna/ffjson/fflib/v1/buffer_pool.go
*** ./github.com/pquerna/ffjson/fflib/v1/buffer_pool.go Mon Mar 11 19:34:50 2024
--- ./github.com/pquerna/ffjson/fflib/v1/buffer_pool.go Mon Mar 11 18:36:36 2024
***************
*** 8,17 ****
// Allocation pools for Buffers.
! import "sync"
! var pools [14]sync.Pool
! var pool64 *sync.Pool
func init() {
var i uint
--- 8,22 ----
// Allocation pools for Buffers.
! type fakePool struct {
! New func() any
! }
! func (f *fakePool) Get() any { return f.New() }
! func (f *fakePool) Put(x any) {}
!
! var pools [14]fakePool
! var pool64 *fakePool
func init() {
var i uint
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/decoder.go'
Index: ./github.com/pquerna/ffjson/inception/decoder.go
*** ./github.com/pquerna/ffjson/inception/decoder.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/decoder.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,323 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/pquerna/ffjson/shared"
+ )
+
+ var validValues []string = []string{
+ "FFTok_left_brace",
+ "FFTok_left_bracket",
+ "FFTok_integer",
+ "FFTok_double",
+ "FFTok_string",
+ "FFTok_bool",
+ "FFTok_null",
+ }
+
+ func CreateUnmarshalJSON(ic *Inception, si *StructInfo) error {
+ out := ""
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ if len(si.Fields) > 0 {
+ ic.OutputImports[`"bytes"`] = true
+ }
+ ic.OutputImports[`"fmt"`] = true
+
+ out += tplStr(decodeTpl["header"], header{
+ IC: ic,
+ SI: si,
+ })
+
+ out += tplStr(decodeTpl["ujFunc"], ujFunc{
+ SI: si,
+ IC: ic,
+ ValidValues: validValues,
+ ResetFields: ic.ResetFields,
+ })
+
+ ic.OutputFuncs = append(ic.OutputFuncs, out)
+
+ return nil
+ }
+
+ func handleField(ic *Inception, name string, typ reflect.Type, ptr bool, quoted bool) string {
+ return handleFieldAddr(ic, name, false, typ, ptr, quoted)
+ }
+
+ func handleFieldAddr(ic *Inception, name string, takeAddr bool, typ reflect.Type, ptr bool, quoted bool) string {
+ out := fmt.Sprintf("/* handler: %s type=%v kind=%v quoted=%t*/\n", name, typ, typ.Kind(), quoted)
+
+ umlx := typ.Implements(unmarshalFasterType) || typeInInception(ic, typ, shared.MustDecoder)
+ umlx = umlx || reflect.PtrTo(typ).Implements(unmarshalFasterType)
+
+ umlstd := typ.Implements(unmarshalerType) || reflect.PtrTo(typ).Implements(unmarshalerType)
+
+ out += tplStr(decodeTpl["handleUnmarshaler"], handleUnmarshaler{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ Ptr: reflect.Ptr,
+ TakeAddr: takeAddr || ptr,
+ UnmarshalJSONFFLexer: umlx,
+ Unmarshaler: umlstd,
+ })
+
+ if umlx || umlstd {
+ return out
+ }
+
+ // TODO(pquerna): generic handling of token type mismatching struct type
+ switch typ.Kind() {
+ case reflect.Int,
+ reflect.Int8,
+ reflect.Int16,
+ reflect.Int32,
+ reflect.Int64:
+
+ allowed := buildTokens(quoted, "FFTok_string", "FFTok_integer", "FFTok_null")
+ out += getAllowTokens(typ.Name(), allowed...)
+
+ out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseInt")
+
+ case reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64:
+
+ allowed := buildTokens(quoted, "FFTok_string", "FFTok_integer", "FFTok_null")
+ out += getAllowTokens(typ.Name(), allowed...)
+
+ out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseUint")
+
+ case reflect.Float32,
+ reflect.Float64:
+
+ allowed := buildTokens(quoted, "FFTok_string", "FFTok_double", "FFTok_integer", "FFTok_null")
+ out += getAllowTokens(typ.Name(), allowed...)
+
+ out += getNumberHandler(ic, name, takeAddr || ptr, typ, "ParseFloat")
+
+ case reflect.Bool:
+ ic.OutputImports[`"bytes"`] = true
+ ic.OutputImports[`"errors"`] = true
+
+ allowed := buildTokens(quoted, "FFTok_string", "FFTok_bool", "FFTok_null")
+ out += getAllowTokens(typ.Name(), allowed...)
+
+ out += tplStr(decodeTpl["handleBool"], handleBool{
+ Name: name,
+ Typ: typ,
+ TakeAddr: takeAddr || ptr,
+ })
+
+ case reflect.Ptr:
+ out += tplStr(decodeTpl["handlePtr"], handlePtr{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ Quoted: quoted,
+ })
+
+ case reflect.Array,
+ reflect.Slice:
+ out += getArrayHandler(ic, name, typ, ptr)
+
+ case reflect.String:
+ // Is it a json.Number?
+ if typ.PkgPath() == "encoding/json" && typ.Name() == "Number" {
+ // Fall back to json package to rely on the valid number check.
+ // See: https://github.com/golang/go/blob/f05c3aa24d815cd3869153750c9875e35fc48a6e/src/encoding/json/decode.go#L897
+ ic.OutputImports[`"encoding/json"`] = true
+ out += tplStr(decodeTpl["handleFallback"], handleFallback{
+ Name: name,
+ Typ: typ,
+ Kind: typ.Kind(),
+ })
+ } else {
+ out += tplStr(decodeTpl["handleString"], handleString{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ TakeAddr: takeAddr || ptr,
+ Quoted: quoted,
+ })
+ }
+ case reflect.Interface:
+ ic.OutputImports[`"encoding/json"`] = true
+ out += tplStr(decodeTpl["handleFallback"], handleFallback{
+ Name: name,
+ Typ: typ,
+ Kind: typ.Kind(),
+ })
+ case reflect.Map:
+ out += tplStr(decodeTpl["handleObject"], handleObject{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ Ptr: reflect.Ptr,
+ TakeAddr: takeAddr || ptr,
+ })
+ default:
+ ic.OutputImports[`"encoding/json"`] = true
+ out += tplStr(decodeTpl["handleFallback"], handleFallback{
+ Name: name,
+ Typ: typ,
+ Kind: typ.Kind(),
+ })
+ }
+
+ return out
+ }
+
+ func getArrayHandler(ic *Inception, name string, typ reflect.Type, ptr bool) string {
+ if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
+ ic.OutputImports[`"encoding/base64"`] = true
+ useReflectToSet := false
+ if typ.Elem().Name() != "byte" {
+ ic.OutputImports[`"reflect"`] = true
+ useReflectToSet = true
+ }
+
+ return tplStr(decodeTpl["handleByteSlice"], handleArray{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ Ptr: reflect.Ptr,
+ UseReflectToSet: useReflectToSet,
+ })
+ }
+
+ if typ.Elem().Kind() == reflect.Struct && typ.Elem().Name() != "" {
+ goto sliceOrArray
+ }
+
+ if (typ.Elem().Kind() == reflect.Struct || typ.Elem().Kind() == reflect.Map) ||
+ typ.Elem().Kind() == reflect.Array || typ.Elem().Kind() == reflect.Slice &&
+ typ.Elem().Name() == "" {
+ ic.OutputImports[`"encoding/json"`] = true
+
+ return tplStr(decodeTpl["handleFallback"], handleFallback{
+ Name: name,
+ Typ: typ,
+ Kind: typ.Kind(),
+ })
+ }
+
+ sliceOrArray:
+
+ if typ.Kind() == reflect.Array {
+ return tplStr(decodeTpl["handleArray"], handleArray{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ IsPtr: ptr,
+ Ptr: reflect.Ptr,
+ })
+ }
+
+ return tplStr(decodeTpl["handleSlice"], handleArray{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ IsPtr: ptr,
+ Ptr: reflect.Ptr,
+ })
+ }
+
+ func getAllowTokens(name string, tokens ...string) string {
+ return tplStr(decodeTpl["allowTokens"], allowTokens{
+ Name: name,
+ Tokens: tokens,
+ })
+ }
+
+ func getNumberHandler(ic *Inception, name string, takeAddr bool, typ reflect.Type, parsefunc string) string {
+ return tplStr(decodeTpl["handlerNumeric"], handlerNumeric{
+ IC: ic,
+ Name: name,
+ ParseFunc: parsefunc,
+ TakeAddr: takeAddr,
+ Typ: typ,
+ })
+ }
+
+ func getNumberSize(typ reflect.Type) string {
+ return fmt.Sprintf("%d", typ.Bits())
+ }
+
+ func getType(ic *Inception, name string, typ reflect.Type) string {
+ s := typ.Name()
+
+ if typ.PkgPath() != "" && typ.PkgPath() != ic.PackagePath {
+ path := removeVendor(typ.PkgPath())
+ ic.OutputImports[`"`+path+`"`] = true
+ s = typ.String()
+ }
+
+ if s == "" {
+ return typ.String()
+ }
+
+ return s
+ }
+
+ // removeVendor removes everything before and including a '/vendor/'
+ // substring in the package path.
+ // This is needed becuase that full path can't be used in the
+ // import statement.
+ func removeVendor(path string) string {
+ i := strings.Index(path, "/vendor/")
+ if i == -1 {
+ return path
+ }
+ return path[i+8:]
+ }
+
+ func buildTokens(containsOptional bool, optional string, required ...string) []string {
+ if containsOptional {
+ return append(required, optional)
+ }
+
+ return required
+ }
+
+ func unquoteField(quoted bool) string {
+ // The outer quote of a string is already stripped out by
+ // the lexer. We need to check if the inner string is also
+ // quoted. If so, we will decode it as json string. If decoding
+ // fails, we will use the original string
+ if quoted {
+ return `
+ unquoted, ok := fflib.UnquoteBytes(outBuf)
+ if ok {
+ outBuf = unquoted
+ }
+ `
+ }
+ return ""
+ }
+
+ func getTmpVarFor(name string) string {
+ return "tmp" + strings.Replace(strings.Title(name), ".", "", -1)
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/decoder_tpl.go'
Index: ./github.com/pquerna/ffjson/inception/decoder_tpl.go
*** ./github.com/pquerna/ffjson/inception/decoder_tpl.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/decoder_tpl.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,773 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "reflect"
+ "strconv"
+ "text/template"
+ )
+
+ var decodeTpl map[string]*template.Template
+
+ func init() {
+ decodeTpl = make(map[string]*template.Template)
+
+ funcs := map[string]string{
+ "handlerNumeric": handlerNumericTxt,
+ "allowTokens": allowTokensTxt,
+ "handleFallback": handleFallbackTxt,
+ "handleString": handleStringTxt,
+ "handleObject": handleObjectTxt,
+ "handleArray": handleArrayTxt,
+ "handleSlice": handleSliceTxt,
+ "handleByteSlice": handleByteSliceTxt,
+ "handleBool": handleBoolTxt,
+ "handlePtr": handlePtrTxt,
+ "header": headerTxt,
+ "ujFunc": ujFuncTxt,
+ "handleUnmarshaler": handleUnmarshalerTxt,
+ }
+
+ tplFuncs := template.FuncMap{
+ "getAllowTokens": getAllowTokens,
+ "getNumberSize": getNumberSize,
+ "getType": getType,
+ "handleField": handleField,
+ "handleFieldAddr": handleFieldAddr,
+ "unquoteField": unquoteField,
+ "getTmpVarFor": getTmpVarFor,
+ }
+
+ for k, v := range funcs {
+ decodeTpl[k] = template.Must(template.New(k).Funcs(tplFuncs).Parse(v))
+ }
+ }
+
+ type handlerNumeric struct {
+ IC *Inception
+ Name string
+ ParseFunc string
+ Typ reflect.Type
+ TakeAddr bool
+ }
+
+ var handlerNumericTxt = `
+ {
+ {{$ic := .IC}}
+
+ if tok == fflib.FFTok_null {
+ {{if eq .TakeAddr true}}
+ {{.Name}} = nil
+ {{end}}
+ } else {
+ {{if eq .ParseFunc "ParseFloat" }}
+ tval, err := fflib.{{ .ParseFunc}}(fs.Output.Bytes(), {{getNumberSize .Typ}})
+ {{else}}
+ tval, err := fflib.{{ .ParseFunc}}(fs.Output.Bytes(), 10, {{getNumberSize .Typ}})
+ {{end}}
+
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ {{if eq .TakeAddr true}}
+ ttypval := {{getType $ic .Name .Typ}}(tval)
+ {{.Name}} = &ttypval
+ {{else}}
+ {{.Name}} = {{getType $ic .Name .Typ}}(tval)
+ {{end}}
+ }
+ }
+ `
+
+ type allowTokens struct {
+ Name string
+ Tokens []string
+ }
+
+ var allowTokensTxt = `
+ {
+ if {{range $index, $element := .Tokens}}{{if ne $index 0 }}&&{{end}} tok != fflib.{{$element}}{{end}} {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for {{.Name}}", tok))
+ }
+ }
+ `
+
+ type handleFallback struct {
+ Name string
+ Typ reflect.Type
+ Kind reflect.Kind
+ }
+
+ var handleFallbackTxt = `
+ {
+ /* Falling back. type={{printf "%v" .Typ}} kind={{printf "%v" .Kind}} */
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ err = json.Unmarshal(tbuf, &{{.Name}})
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ `
+
+ type handleString struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ TakeAddr bool
+ Quoted bool
+ }
+
+ var handleStringTxt = `
+ {
+ {{$ic := .IC}}
+
+ {{getAllowTokens .Typ.Name "FFTok_string" "FFTok_null"}}
+ if tok == fflib.FFTok_null {
+ {{if eq .TakeAddr true}}
+ {{.Name}} = nil
+ {{end}}
+ } else {
+ {{if eq .TakeAddr true}}
+ var tval {{getType $ic .Name .Typ}}
+ outBuf := fs.Output.Bytes()
+ {{unquoteField .Quoted}}
+ tval = {{getType $ic .Name .Typ}}(string(outBuf))
+ {{.Name}} = &tval
+ {{else}}
+ outBuf := fs.Output.Bytes()
+ {{unquoteField .Quoted}}
+ {{.Name}} = {{getType $ic .Name .Typ}}(string(outBuf))
+ {{end}}
+ }
+ }
+ `
+
+ type handleObject struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ Ptr reflect.Kind
+ TakeAddr bool
+ }
+
+ var handleObjectTxt = `
+ {
+ {{$ic := .IC}}
+ {{getAllowTokens .Typ.Name "FFTok_left_bracket" "FFTok_null"}}
+ if tok == fflib.FFTok_null {
+ {{.Name}} = nil
+ } else {
+
+ {{if eq .TakeAddr true}}
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{if eq .Typ.Key.Kind .Ptr }}
+ var tval = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
+ {{else}}
+ var tval = make(map[{{getType $ic .Name .Typ.Key}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
+ {{end}}
+ {{else}}
+ {{if eq .Typ.Key.Kind .Ptr }}
+ var tval = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]{{getType $ic .Name .Typ.Elem}}, 0)
+ {{else}}
+ var tval = make(map[{{getType $ic .Name .Typ.Key}}]{{getType $ic .Name .Typ.Elem}}, 0)
+ {{end}}
+ {{end}}
+ {{else}}
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{if eq .Typ.Key.Kind .Ptr }}
+ {{.Name}} = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
+ {{else}}
+ {{.Name}} = make(map[{{getType $ic .Name .Typ.Key}}]*{{getType $ic .Name .Typ.Elem.Elem}}, 0)
+ {{end}}
+ {{else}}
+ {{if eq .Typ.Key.Kind .Ptr }}
+ {{.Name}} = make(map[*{{getType $ic .Name .Typ.Key.Elem}}]{{getType $ic .Name .Typ.Elem}}, 0)
+ {{else}}
+ {{.Name}} = make(map[{{getType $ic .Name .Typ.Key}}]{{getType $ic .Name .Typ.Elem}}, 0)
+ {{end}}
+ {{end}}
+ {{end}}
+
+ wantVal := true
+
+ for {
+ {{$keyPtr := false}}
+ {{if eq .Typ.Key.Kind .Ptr }}
+ {{$keyPtr := true}}
+ var k *{{getType $ic .Name .Typ.Key.Elem}}
+ {{else}}
+ var k {{getType $ic .Name .Typ.Key}}
+ {{end}}
+
+ {{$valPtr := false}}
+ {{$tmpVar := getTmpVarFor .Name}}
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{$valPtr := true}}
+ var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
+ {{else}}
+ var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
+ {{end}}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_bracket {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ {{handleField .IC "k" .Typ.Key $keyPtr false}}
+
+ // Expect ':' after key
+ tok = fs.Scan()
+ if tok != fflib.FFTok_colon {
+ return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok))
+ }
+
+ tok = fs.Scan()
+ {{handleField .IC $tmpVar .Typ.Elem $valPtr false}}
+
+ {{if eq .TakeAddr true}}
+ tval[k] = {{$tmpVar}}
+ {{else}}
+ {{.Name}}[k] = {{$tmpVar}}
+ {{end}}
+ wantVal = false
+ }
+
+ {{if eq .TakeAddr true}}
+ {{.Name}} = &tval
+ {{end}}
+ }
+ }
+ `
+
+ type handleArray struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ Ptr reflect.Kind
+ UseReflectToSet bool
+ IsPtr bool
+ }
+
+ var handleArrayTxt = `
+ {
+ {{$ic := .IC}}
+ {{getAllowTokens .Typ.Name "FFTok_left_brace" "FFTok_null"}}
+ {{if eq .Typ.Elem.Kind .Ptr}}
+ {{.Name}} = [{{.Typ.Len}}]*{{getType $ic .Name .Typ.Elem.Elem}}{}
+ {{else}}
+ {{.Name}} = [{{.Typ.Len}}]{{getType $ic .Name .Typ.Elem}}{}
+ {{end}}
+ if tok != fflib.FFTok_null {
+ wantVal := true
+
+ idx := 0
+ for {
+ {{$ptr := false}}
+ {{$tmpVar := getTmpVarFor .Name}}
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{$ptr := true}}
+ var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
+ {{else}}
+ var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
+ {{end}}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ {{handleField .IC $tmpVar .Typ.Elem $ptr false}}
+
+ // Standard json.Unmarshal ignores elements out of array bounds,
+ // that what we do as well.
+ if idx < {{.Typ.Len}} {
+ {{.Name}}[idx] = {{$tmpVar}}
+ idx++
+ }
+
+ wantVal = false
+ }
+ }
+ }
+ `
+
+ var handleSliceTxt = `
+ {
+ {{$ic := .IC}}
+ {{getAllowTokens .Typ.Name "FFTok_left_brace" "FFTok_null"}}
+ if tok == fflib.FFTok_null {
+ {{.Name}} = nil
+ } else {
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{if eq .IsPtr true}}
+ {{.Name}} = &[]*{{getType $ic .Name .Typ.Elem.Elem}}{}
+ {{else}}
+ {{.Name}} = []*{{getType $ic .Name .Typ.Elem.Elem}}{}
+ {{end}}
+ {{else}}
+ {{if eq .IsPtr true}}
+ {{.Name}} = &[]{{getType $ic .Name .Typ.Elem}}{}
+ {{else}}
+ {{.Name}} = []{{getType $ic .Name .Typ.Elem}}{}
+ {{end}}
+ {{end}}
+
+ wantVal := true
+
+ for {
+ {{$ptr := false}}
+ {{$tmpVar := getTmpVarFor .Name}}
+ {{if eq .Typ.Elem.Kind .Ptr }}
+ {{$ptr := true}}
+ var {{$tmpVar}} *{{getType $ic .Name .Typ.Elem.Elem}}
+ {{else}}
+ var {{$tmpVar}} {{getType $ic .Name .Typ.Elem}}
+ {{end}}
+
+ tok = fs.Scan()
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+ if tok == fflib.FFTok_right_brace {
+ break
+ }
+
+ if tok == fflib.FFTok_comma {
+ if wantVal == true {
+ // TODO(pquerna): this isn't an ideal error message, this handles
+ // things like [,,,] as an array value.
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ }
+ continue
+ } else {
+ wantVal = true
+ }
+
+ {{handleField .IC $tmpVar .Typ.Elem $ptr false}}
+ {{if eq .IsPtr true}}
+ *{{.Name}} = append(*{{.Name}}, {{$tmpVar}})
+ {{else}}
+ {{.Name}} = append({{.Name}}, {{$tmpVar}})
+ {{end}}
+ wantVal = false
+ }
+ }
+ }
+ `
+
+ var handleByteSliceTxt = `
+ {
+ {{getAllowTokens .Typ.Name "FFTok_string" "FFTok_null"}}
+ if tok == fflib.FFTok_null {
+ {{.Name}} = nil
+ } else {
+ b := make([]byte, base64.StdEncoding.DecodedLen(fs.Output.Len()))
+ n, err := base64.StdEncoding.Decode(b, fs.Output.Bytes())
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ {{if eq .UseReflectToSet true}}
+ v := reflect.ValueOf(&{{.Name}}).Elem()
+ v.SetBytes(b[0:n])
+ {{else}}
+ {{.Name}} = append([]byte(), b[0:n]...)
+ {{end}}
+ }
+ }
+ `
+
+ type handleBool struct {
+ Name string
+ Typ reflect.Type
+ TakeAddr bool
+ }
+
+ var handleBoolTxt = `
+ {
+ if tok == fflib.FFTok_null {
+ {{if eq .TakeAddr true}}
+ {{.Name}} = nil
+ {{end}}
+ } else {
+ tmpb := fs.Output.Bytes()
+
+ {{if eq .TakeAddr true}}
+ var tval bool
+ {{end}}
+
+ if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
+ {{if eq .TakeAddr true}}
+ tval = true
+ {{else}}
+ {{.Name}} = true
+ {{end}}
+ } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
+ {{if eq .TakeAddr true}}
+ tval = false
+ {{else}}
+ {{.Name}} = false
+ {{end}}
+ } else {
+ err = errors.New("unexpected bytes for true/false value")
+ return fs.WrapErr(err)
+ }
+
+ {{if eq .TakeAddr true}}
+ {{.Name}} = &tval
+ {{end}}
+ }
+ }
+ `
+
+ type handlePtr struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ Quoted bool
+ }
+
+ var handlePtrTxt = `
+ {
+ {{$ic := .IC}}
+
+ if tok == fflib.FFTok_null {
+ {{.Name}} = nil
+ } else {
+ if {{.Name}} == nil {
+ {{.Name}} = new({{getType $ic .Typ.Elem.Name .Typ.Elem}})
+ }
+
+ {{handleFieldAddr .IC .Name true .Typ.Elem false .Quoted}}
+ }
+ }
+ `
+
+ type header struct {
+ IC *Inception
+ SI *StructInfo
+ }
+
+ var headerTxt = `
+ const (
+ ffjt{{.SI.Name}}base = iota
+ ffjt{{.SI.Name}}nosuchkey
+ {{with $si := .SI}}
+ {{range $index, $field := $si.Fields}}
+ {{if ne $field.JsonName "-"}}
+ ffjt{{$si.Name}}{{$field.Name}}
+ {{end}}
+ {{end}}
+ {{end}}
+ )
+
+ {{with $si := .SI}}
+ {{range $index, $field := $si.Fields}}
+ {{if ne $field.JsonName "-"}}
+ var ffjKey{{$si.Name}}{{$field.Name}} = []byte({{$field.JsonName}})
+ {{end}}
+ {{end}}
+ {{end}}
+
+ `
+
+ type ujFunc struct {
+ IC *Inception
+ SI *StructInfo
+ ValidValues []string
+ ResetFields bool
+ }
+
+ var ujFuncTxt = `
+ {{$si := .SI}}
+ {{$ic := .IC}}
+
+ // UnmarshalJSON umarshall json - template of ffjson
+ func (j *{{.SI.Name}}) UnmarshalJSON(input []byte) error {
+ fs := fflib.NewFFLexer(input)
+ return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start)
+ }
+
+ // UnmarshalJSONFFLexer fast json unmarshall - template ffjson
+ func (j *{{.SI.Name}}) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error {
+ var err error
+ currentKey := ffjt{{.SI.Name}}base
+ _ = currentKey
+ tok := fflib.FFTok_init
+ wantedTok := fflib.FFTok_init
+
+ {{if eq .ResetFields true}}
+ {{range $index, $field := $si.Fields}}
+ var ffjSet{{$si.Name}}{{$field.Name}} = false
+ {{end}}
+ {{end}}
+
+ mainparse:
+ for {
+ tok = fs.Scan()
+ // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state))
+ if tok == fflib.FFTok_error {
+ goto tokerror
+ }
+
+ switch state {
+
+ case fflib.FFParse_map_start:
+ if tok != fflib.FFTok_left_bracket {
+ wantedTok = fflib.FFTok_left_bracket
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_key
+ continue
+
+ case fflib.FFParse_after_value:
+ if tok == fflib.FFTok_comma {
+ state = fflib.FFParse_want_key
+ } else if tok == fflib.FFTok_right_bracket {
+ goto done
+ } else {
+ wantedTok = fflib.FFTok_comma
+ goto wrongtokenerror
+ }
+
+ case fflib.FFParse_want_key:
+ // json {} ended. goto exit. woo.
+ if tok == fflib.FFTok_right_bracket {
+ goto done
+ }
+ if tok != fflib.FFTok_string {
+ wantedTok = fflib.FFTok_string
+ goto wrongtokenerror
+ }
+
+ kn := fs.Output.Bytes()
+ if len(kn) <= 0 {
+ // "" case. hrm.
+ currentKey = ffjt{{.SI.Name}}nosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ } else {
+ switch kn[0] {
+ {{range $byte, $fields := $si.FieldsByFirstByte}}
+ case '{{$byte}}':
+ {{range $index, $field := $fields}}
+ {{if ne $index 0 }}} else if {{else}}if {{end}} bytes.Equal(ffjKey{{$si.Name}}{{$field.Name}}, kn) {
+ currentKey = ffjt{{$si.Name}}{{$field.Name}}
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ {{end}} }
+ {{end}}
+ }
+ {{range $index, $field := $si.ReverseFields}}
+ if {{$field.FoldFuncName}}(ffjKey{{$si.Name}}{{$field.Name}}, kn) {
+ currentKey = ffjt{{$si.Name}}{{$field.Name}}
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+ {{end}}
+ currentKey = ffjt{{.SI.Name}}nosuchkey
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
+ case fflib.FFParse_want_colon:
+ if tok != fflib.FFTok_colon {
+ wantedTok = fflib.FFTok_colon
+ goto wrongtokenerror
+ }
+ state = fflib.FFParse_want_value
+ continue
+ case fflib.FFParse_want_value:
+
+ if {{range $index, $v := .ValidValues}}{{if ne $index 0 }}||{{end}}tok == fflib.{{$v}}{{end}} {
+ switch currentKey {
+ {{range $index, $field := $si.Fields}}
+ case ffjt{{$si.Name}}{{$field.Name}}:
+ goto handle_{{$field.Name}}
+ {{end}}
+ case ffjt{{$si.Name}}nosuchkey:
+ err = fs.SkipField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ state = fflib.FFParse_after_value
+ goto mainparse
+ }
+ } else {
+ goto wantedvalue
+ }
+ }
+ }
+ {{range $index, $field := $si.Fields}}
+ handle_{{$field.Name}}:
+ {{with $fieldName := $field.Name | printf "j.%s"}}
+ {{handleField $ic $fieldName $field.Typ $field.Pointer $field.ForceString}}
+ {{if eq $.ResetFields true}}
+ ffjSet{{$si.Name}}{{$field.Name}} = true
+ {{end}}
+ state = fflib.FFParse_after_value
+ goto mainparse
+ {{end}}
+ {{end}}
+
+ wantedvalue:
+ return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
+ wrongtokenerror:
+ return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String()))
+ tokerror:
+ if fs.BigError != nil {
+ return fs.WrapErr(fs.BigError)
+ }
+ err = fs.Error.ToError()
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ panic("ffjson-generated: unreachable, please report bug.")
+ done:
+ {{if eq .ResetFields true}}
+ {{range $index, $field := $si.Fields}}
+ if !ffjSet{{$si.Name}}{{$field.Name}} {
+ {{with $fieldName := $field.Name | printf "j.%s"}}
+ {{if eq $field.Pointer true}}
+ {{$fieldName}} = nil
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Interface), 10) + `}}
+ {{$fieldName}} = nil
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Slice), 10) + `}}
+ {{$fieldName}} = nil
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Array), 10) + `}}
+ {{$fieldName}} = [{{$field.Typ.Len}}]{{getType $ic $fieldName $field.Typ.Elem}}{}
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Map), 10) + `}}
+ {{$fieldName}} = nil
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Bool), 10) + `}}
+ {{$fieldName}} = false
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.String), 10) + `}}
+ {{$fieldName}} = ""
+ {{else if eq $field.Typ.Kind ` + strconv.FormatUint(uint64(reflect.Struct), 10) + `}}
+ {{$fieldName}} = {{getType $ic $fieldName $field.Typ}}{}
+ {{else}}
+ {{$fieldName}} = {{getType $ic $fieldName $field.Typ}}(0)
+ {{end}}
+ {{end}}
+ }
+ {{end}}
+ {{end}}
+ return nil
+ }
+ `
+
+ type handleUnmarshaler struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ Ptr reflect.Kind
+ TakeAddr bool
+ UnmarshalJSONFFLexer bool
+ Unmarshaler bool
+ }
+
+ var handleUnmarshalerTxt = `
+ {{$ic := .IC}}
+
+ {{if eq .UnmarshalJSONFFLexer true}}
+ {
+ if tok == fflib.FFTok_null {
+ {{if eq .Typ.Kind .Ptr }}
+ {{.Name}} = nil
+ {{end}}
+ {{if eq .TakeAddr true }}
+ {{.Name}} = nil
+ {{end}}
+ } else {
+ {{if eq .Typ.Kind .Ptr }}
+ if {{.Name}} == nil {
+ {{.Name}} = new({{getType $ic .Typ.Elem.Name .Typ.Elem}})
+ }
+ {{end}}
+ {{if eq .TakeAddr true }}
+ if {{.Name}} == nil {
+ {{.Name}} = new({{getType $ic .Typ.Name .Typ}})
+ }
+ {{end}}
+ err = {{.Name}}.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key)
+ if err != nil {
+ return err
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+ {{else}}
+ {{if eq .Unmarshaler true}}
+ {
+ if tok == fflib.FFTok_null {
+ {{if eq .TakeAddr true }}
+ {{.Name}} = nil
+ {{end}}
+ } else {
+
+ tbuf, err := fs.CaptureField(tok)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+
+ {{if eq .TakeAddr true }}
+ if {{.Name}} == nil {
+ {{.Name}} = new({{getType $ic .Typ.Name .Typ}})
+ }
+ {{end}}
+ err = {{.Name}}.UnmarshalJSON(tbuf)
+ if err != nil {
+ return fs.WrapErr(err)
+ }
+ }
+ state = fflib.FFParse_after_value
+ }
+ {{end}}
+ {{end}}
+ `
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/encoder.go'
Index: ./github.com/pquerna/ffjson/inception/encoder.go
*** ./github.com/pquerna/ffjson/inception/encoder.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/encoder.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,544 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "fmt"
+ "reflect"
+
+ "github.com/pquerna/ffjson/shared"
+ )
+
+ func typeInInception(ic *Inception, typ reflect.Type, f shared.Feature) bool {
+ for _, v := range ic.objs {
+ if v.Typ == typ {
+ return v.Options.HasFeature(f)
+ }
+ if typ.Kind() == reflect.Ptr {
+ if v.Typ == typ.Elem() {
+ return v.Options.HasFeature(f)
+ }
+ }
+ }
+
+ return false
+ }
+
+ func getOmitEmpty(ic *Inception, sf *StructField) string {
+ ptname := "j." + sf.Name
+ if sf.Pointer {
+ ptname = "*" + ptname
+ return "if true {\n"
+ }
+ switch sf.Typ.Kind() {
+
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return "if len(" + ptname + ") != 0 {" + "\n"
+
+ case reflect.Int,
+ reflect.Int8,
+ reflect.Int16,
+ reflect.Int32,
+ reflect.Int64,
+ reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64,
+ reflect.Uintptr,
+ reflect.Float32,
+ reflect.Float64:
+ return "if " + ptname + " != 0 {" + "\n"
+
+ case reflect.Bool:
+ return "if " + ptname + " != false {" + "\n"
+
+ case reflect.Interface, reflect.Ptr:
+ return "if " + ptname + " != nil {" + "\n"
+
+ default:
+ // TODO(pquerna): fix types
+ return "if true {" + "\n"
+ }
+ }
+
+ func getMapValue(ic *Inception, name string, typ reflect.Type, ptr bool, forceString bool) string {
+ var out = ""
+
+ if typ.Key().Kind() != reflect.String {
+ out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
+ out += ic.q.Flush()
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ return out
+ }
+
+ var elemKind reflect.Kind
+ elemKind = typ.Elem().Kind()
+
+ switch elemKind {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32,
+ reflect.Float64,
+ reflect.Bool:
+
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+
+ out += "if " + name + " == nil {" + "\n"
+ ic.q.Write("null")
+ out += ic.q.GetQueued()
+ ic.q.DeleteLast()
+ out += "} else {" + "\n"
+ out += ic.q.WriteFlush("{ ")
+ out += " for key, value := range " + name + " {" + "\n"
+ out += " fflib.WriteJsonString(buf, key)" + "\n"
+ out += " buf.WriteString(`:`)" + "\n"
+ out += getGetInnerValue(ic, "value", typ.Elem(), false, forceString)
+ out += " buf.WriteByte(',')" + "\n"
+ out += " }" + "\n"
+ out += "buf.Rewind(1)" + "\n"
+ out += ic.q.WriteFlush("}")
+ out += "}" + "\n"
+
+ default:
+ out += ic.q.Flush()
+ out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ }
+ return out
+ }
+
+ func getGetInnerValue(ic *Inception, name string, typ reflect.Type, ptr bool, forceString bool) string {
+ var out = ""
+
+ // Flush if not bool or maps
+ if typ.Kind() != reflect.Bool && typ.Kind() != reflect.Map && typ.Kind() != reflect.Struct {
+ out += ic.q.Flush()
+ }
+
+ if typ.Implements(marshalerFasterType) ||
+ reflect.PtrTo(typ).Implements(marshalerFasterType) ||
+ typeInInception(ic, typ, shared.MustEncoder) ||
+ typ.Implements(marshalerType) ||
+ reflect.PtrTo(typ).Implements(marshalerType) {
+
+ out += ic.q.Flush()
+ out += tplStr(encodeTpl["handleMarshaler"], handleMarshaler{
+ IC: ic,
+ Name: name,
+ Typ: typ,
+ Ptr: reflect.Ptr,
+ MarshalJSONBuf: typ.Implements(marshalerFasterType) || reflect.PtrTo(typ).Implements(marshalerFasterType) || typeInInception(ic, typ, shared.MustEncoder),
+ Marshaler: typ.Implements(marshalerType) || reflect.PtrTo(typ).Implements(marshalerType),
+ })
+ return out
+ }
+
+ ptname := name
+ if ptr {
+ ptname = "*" + name
+ }
+
+ switch typ.Kind() {
+ case reflect.Int,
+ reflect.Int8,
+ reflect.Int16,
+ reflect.Int32,
+ reflect.Int64:
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ out += "fflib.FormatBits2(buf, uint64(" + ptname + "), 10, " + ptname + " < 0)" + "\n"
+ case reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64,
+ reflect.Uintptr:
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ out += "fflib.FormatBits2(buf, uint64(" + ptname + "), 10, false)" + "\n"
+ case reflect.Float32:
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ out += "fflib.AppendFloat(buf, float64(" + ptname + "), 'g', -1, 32)" + "\n"
+ case reflect.Float64:
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ out += "fflib.AppendFloat(buf, float64(" + ptname + "), 'g', -1, 64)" + "\n"
+ case reflect.Array,
+ reflect.Slice:
+
+ // Arrays cannot be nil
+ if typ.Kind() != reflect.Array {
+ out += "if " + name + "!= nil {" + "\n"
+ }
+ // Array and slice values encode as JSON arrays, except that
+ // []byte encodes as a base64-encoded string, and a nil slice
+ // encodes as the null JSON object.
+ if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
+ ic.OutputImports[`"encoding/base64"`] = true
+
+ out += "buf.WriteString(`\"`)" + "\n"
+ out += `{` + "\n"
+ out += `enc := base64.NewEncoder(base64.StdEncoding, buf)` + "\n"
+ if typ.Elem().Name() != "byte" {
+ ic.OutputImports[`"reflect"`] = true
+ out += `enc.Write(reflect.Indirect(reflect.ValueOf(` + ptname + `)).Bytes())` + "\n"
+
+ } else {
+ out += `enc.Write(` + ptname + `)` + "\n"
+ }
+ out += `enc.Close()` + "\n"
+ out += `}` + "\n"
+ out += "buf.WriteString(`\"`)" + "\n"
+ } else {
+ out += "buf.WriteString(`[`)" + "\n"
+ out += "for i, v := range " + ptname + "{" + "\n"
+ out += "if i != 0 {" + "\n"
+ out += "buf.WriteString(`,`)" + "\n"
+ out += "}" + "\n"
+ out += getGetInnerValue(ic, "v", typ.Elem(), false, false)
+ out += "}" + "\n"
+ out += "buf.WriteString(`]`)" + "\n"
+ }
+ if typ.Kind() != reflect.Array {
+ out += "} else {" + "\n"
+ out += "buf.WriteString(`null`)" + "\n"
+ out += "}" + "\n"
+ }
+ case reflect.String:
+ // Is it a json.Number?
+ if typ.PkgPath() == "encoding/json" && typ.Name() == "Number" {
+ // Fall back to json package to rely on the valid number check.
+ // See: https://github.com/golang/go/blob/92cd6e3af9f423ab4d8ac78f24e7fd81c31a8ce6/src/encoding/json/encode.go#L550
+ out += fmt.Sprintf("/* json.Number */\n")
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ } else {
+ ic.OutputImports[`fflib "github.com/pquerna/ffjson/fflib/v1"`] = true
+ if forceString {
+ // Forcestring on strings does double-escaping of the entire value.
+ // We create a temporary buffer, encode to that an re-encode it.
+ out += "{" + "\n"
+ out += "tmpbuf := fflib.Buffer{}" + "\n"
+ out += "tmpbuf.Grow(len(" + ptname + ") + 16)" + "\n"
+ out += "fflib.WriteJsonString(&tmpbuf, string(" + ptname + "))" + "\n"
+ out += "fflib.WriteJsonString(buf, string( tmpbuf.Bytes() " + `))` + "\n"
+ out += "}" + "\n"
+ } else {
+ out += "fflib.WriteJsonString(buf, string(" + ptname + "))" + "\n"
+ }
+ }
+ case reflect.Ptr:
+ out += "if " + name + "!= nil {" + "\n"
+ switch typ.Elem().Kind() {
+ case reflect.Struct:
+ out += getGetInnerValue(ic, name, typ.Elem(), false, false)
+ default:
+ out += getGetInnerValue(ic, "*"+name, typ.Elem(), false, false)
+ }
+ out += "} else {" + "\n"
+ out += "buf.WriteString(`null`)" + "\n"
+ out += "}" + "\n"
+ case reflect.Bool:
+ out += "if " + ptname + " {" + "\n"
+ ic.q.Write("true")
+ out += ic.q.GetQueued()
+ out += "} else {" + "\n"
+ // Delete 'true'
+ ic.q.DeleteLast()
+ out += ic.q.WriteFlush("false")
+ out += "}" + "\n"
+ case reflect.Interface:
+ out += fmt.Sprintf("/* Interface types must use runtime reflection. type=%v kind=%v */\n", typ, typ.Kind())
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ case reflect.Map:
+ out += getMapValue(ic, ptname, typ, ptr, forceString)
+ case reflect.Struct:
+ if typ.Name() == "" {
+ ic.q.Write("{")
+ ic.q.Write(" ")
+ out += fmt.Sprintf("/* Inline struct. type=%v kind=%v */\n", typ, typ.Kind())
+ newV := reflect.Indirect(reflect.New(typ)).Interface()
+ fields := extractFields(newV)
+
+ // Output all fields
+ for _, field := range fields {
+ // Adjust field name
+ field.Name = name + "." + field.Name
+ out += getField(ic, field, "")
+ }
+
+ if lastConditional(fields) {
+ out += ic.q.Flush()
+ out += `buf.Rewind(1)` + "\n"
+ } else {
+ ic.q.DeleteLast()
+ }
+ out += ic.q.WriteFlush("}")
+ } else {
+ out += fmt.Sprintf("/* Struct fall back. type=%v kind=%v */\n", typ, typ.Kind())
+ out += ic.q.Flush()
+ if ptr {
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ } else {
+ // We send pointer to avoid copying entire struct
+ out += "err = buf.Encode(&" + name + ")" + "\n"
+ }
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ }
+ default:
+ out += fmt.Sprintf("/* Falling back. type=%v kind=%v */\n", typ, typ.Kind())
+ out += "err = buf.Encode(" + name + ")" + "\n"
+ out += "if err != nil {" + "\n"
+ out += " return err" + "\n"
+ out += "}" + "\n"
+ }
+
+ return out
+ }
+
+ func getValue(ic *Inception, sf *StructField, prefix string) string {
+ closequote := false
+ if sf.ForceString {
+ switch sf.Typ.Kind() {
+ case reflect.Int,
+ reflect.Int8,
+ reflect.Int16,
+ reflect.Int32,
+ reflect.Int64,
+ reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64,
+ reflect.Uintptr,
+ reflect.Float32,
+ reflect.Float64,
+ reflect.Bool:
+ ic.q.Write(`"`)
+ closequote = true
+ }
+ }
+ out := getGetInnerValue(ic, prefix+sf.Name, sf.Typ, sf.Pointer, sf.ForceString)
+ if closequote {
+ if sf.Pointer {
+ out += ic.q.WriteFlush(`"`)
+ } else {
+ ic.q.Write(`"`)
+ }
+ }
+
+ return out
+ }
+
+ func p2(v uint32) uint32 {
+ v--
+ v |= v >> 1
+ v |= v >> 2
+ v |= v >> 4
+ v |= v >> 8
+ v |= v >> 16
+ v++
+ return v
+ }
+
+ func getTypeSize(t reflect.Type) uint32 {
+ switch t.Kind() {
+ case reflect.String:
+ // TODO: consider runtime analysis.
+ return 32
+ case reflect.Array, reflect.Map, reflect.Slice:
+ // TODO: consider runtime analysis.
+ return 4 * getTypeSize(t.Elem())
+ case reflect.Int,
+ reflect.Int8,
+ reflect.Int16,
+ reflect.Int32,
+ reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32:
+ return 8
+ case reflect.Int64,
+ reflect.Uint64,
+ reflect.Uintptr:
+ return 16
+ case reflect.Float32,
+ reflect.Float64:
+ return 16
+ case reflect.Bool:
+ return 4
+ case reflect.Ptr:
+ return getTypeSize(t.Elem())
+ default:
+ return 16
+ }
+ }
+
+ func getTotalSize(si *StructInfo) uint32 {
+ rv := uint32(si.Typ.Size())
+ for _, f := range si.Fields {
+ rv += getTypeSize(f.Typ)
+ }
+ return rv
+ }
+
+ func getBufGrowSize(si *StructInfo) uint32 {
+
+ // TOOD(pquerna): automatically calc a better grow size based on history
+ // of a struct.
+ return p2(getTotalSize(si))
+ }
+
+ func isIntish(t reflect.Type) bool {
+ if t.Kind() >= reflect.Int && t.Kind() <= reflect.Uintptr {
+ return true
+ }
+ if t.Kind() == reflect.Array || t.Kind() == reflect.Slice || t.Kind() == reflect.Ptr {
+ if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
+ // base64 special case.
+ return false
+ } else {
+ return isIntish(t.Elem())
+ }
+ }
+ return false
+ }
+
+ func getField(ic *Inception, f *StructField, prefix string) string {
+ out := ""
+ if f.OmitEmpty {
+ out += ic.q.Flush()
+ if f.Pointer {
+ out += "if " + prefix + f.Name + " != nil {" + "\n"
+ }
+ out += getOmitEmpty(ic, f)
+ }
+
+ if f.Pointer && !f.OmitEmpty {
+ // Pointer values encode as the value pointed to. A nil pointer encodes as the null JSON object.
+ out += "if " + prefix + f.Name + " != nil {" + "\n"
+ }
+
+ // JsonName is already escaped and quoted.
+ // getInnervalue should flush
+ ic.q.Write(f.JsonName + ":")
+ // We save a copy in case we need it
+ t := ic.q
+
+ out += getValue(ic, f, prefix)
+ ic.q.Write(",")
+
+ if f.Pointer && !f.OmitEmpty {
+ out += "} else {" + "\n"
+ out += t.WriteFlush("null")
+ out += "}" + "\n"
+ }
+
+ if f.OmitEmpty {
+ out += ic.q.Flush()
+ if f.Pointer {
+ out += "}" + "\n"
+ }
+ out += "}" + "\n"
+ }
+ return out
+ }
+
+ // We check if the last field is conditional.
+ func lastConditional(fields []*StructField) bool {
+ if len(fields) > 0 {
+ f := fields[len(fields)-1]
+ return f.OmitEmpty
+ }
+ return false
+ }
+
+ func CreateMarshalJSON(ic *Inception, si *StructInfo) error {
+ conditionalWrites := lastConditional(si.Fields)
+ out := ""
+
+ out += "// MarshalJSON marshal bytes to json - template\n"
+ out += `func (j *` + si.Name + `) MarshalJSON() ([]byte, error) {` + "\n"
+ out += `var buf fflib.Buffer` + "\n"
+
+ out += `if j == nil {` + "\n"
+ out += ` buf.WriteString("null")` + "\n"
+ out += " return buf.Bytes(), nil" + "\n"
+ out += `}` + "\n"
+
+ out += `err := j.MarshalJSONBuf(&buf)` + "\n"
+ out += `if err != nil {` + "\n"
+ out += " return nil, err" + "\n"
+ out += `}` + "\n"
+ out += `return buf.Bytes(), nil` + "\n"
+ out += `}` + "\n"
+
+ out += "// MarshalJSONBuf marshal buff to json - template\n"
+ out += `func (j *` + si.Name + `) MarshalJSONBuf(buf fflib.EncodingBuffer) (error) {` + "\n"
+ out += ` if j == nil {` + "\n"
+ out += ` buf.WriteString("null")` + "\n"
+ out += " return nil" + "\n"
+ out += ` }` + "\n"
+
+ out += `var err error` + "\n"
+ out += `var obj []byte` + "\n"
+ out += `_ = obj` + "\n"
+ out += `_ = err` + "\n"
+
+ ic.q.Write("{")
+
+ // The extra space is inserted here.
+ // If nothing is written to the field this will be deleted
+ // instead of the last comma.
+ if conditionalWrites || len(si.Fields) == 0 {
+ ic.q.Write(" ")
+ }
+
+ for _, f := range si.Fields {
+ out += getField(ic, f, "j.")
+ }
+
+ // Handling the last comma is tricky.
+ // If the last field has omitempty, conditionalWrites is set.
+ // If something has been written, we delete the last comma,
+ // by backing up the buffer, otherwise it will delete a space.
+ if conditionalWrites {
+ out += ic.q.Flush()
+ out += `buf.Rewind(1)` + "\n"
+ } else {
+ ic.q.DeleteLast()
+ }
+
+ out += ic.q.WriteFlush("}")
+ out += `return nil` + "\n"
+ out += `}` + "\n"
+ ic.OutputFuncs = append(ic.OutputFuncs, out)
+ return nil
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/encoder_tpl.go'
Index: ./github.com/pquerna/ffjson/inception/encoder_tpl.go
*** ./github.com/pquerna/ffjson/inception/encoder_tpl.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/encoder_tpl.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,73 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "reflect"
+ "text/template"
+ )
+
+ var encodeTpl map[string]*template.Template
+
+ func init() {
+ encodeTpl = make(map[string]*template.Template)
+
+ funcs := map[string]string{
+ "handleMarshaler": handleMarshalerTxt,
+ }
+ tplFuncs := template.FuncMap{}
+
+ for k, v := range funcs {
+ encodeTpl[k] = template.Must(template.New(k).Funcs(tplFuncs).Parse(v))
+ }
+ }
+
+ type handleMarshaler struct {
+ IC *Inception
+ Name string
+ Typ reflect.Type
+ Ptr reflect.Kind
+ MarshalJSONBuf bool
+ Marshaler bool
+ }
+
+ var handleMarshalerTxt = `
+ {
+ {{if eq .Typ.Kind .Ptr}}
+ if {{.Name}} == nil {
+ buf.WriteString("null")
+ } else {
+ {{end}}
+
+ {{if eq .MarshalJSONBuf true}}
+ err = {{.Name}}.MarshalJSONBuf(buf)
+ if err != nil {
+ return err
+ }
+ {{else if eq .Marshaler true}}
+ obj, err = {{.Name}}.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ buf.Write(obj)
+ {{end}}
+ {{if eq .Typ.Kind .Ptr}}
+ }
+ {{end}}
+ }
+ `
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/inception.go'
Index: ./github.com/pquerna/ffjson/inception/inception.go
*** ./github.com/pquerna/ffjson/inception/inception.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/inception.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,160 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "errors"
+ "fmt"
+ "github.com/pquerna/ffjson/shared"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "sort"
+ )
+
+ type Inception struct {
+ objs []*StructInfo
+ InputPath string
+ OutputPath string
+ PackageName string
+ PackagePath string
+ OutputImports map[string]bool
+ OutputFuncs []string
+ q ConditionalWrite
+ ResetFields bool
+ }
+
+ func NewInception(inputPath string, packageName string, outputPath string, resetFields bool) *Inception {
+ return &Inception{
+ objs: make([]*StructInfo, 0),
+ InputPath: inputPath,
+ OutputPath: outputPath,
+ PackageName: packageName,
+ OutputFuncs: make([]string, 0),
+ OutputImports: make(map[string]bool),
+ ResetFields: resetFields,
+ }
+ }
+
+ func (i *Inception) AddMany(objs []shared.InceptionType) {
+ for _, obj := range objs {
+ i.Add(obj)
+ }
+ }
+
+ func (i *Inception) Add(obj shared.InceptionType) {
+ i.objs = append(i.objs, NewStructInfo(obj))
+ i.PackagePath = i.objs[0].Typ.PkgPath()
+ }
+
+ func (i *Inception) wantUnmarshal(si *StructInfo) bool {
+ if si.Options.SkipDecoder {
+ return false
+ }
+ typ := si.Typ
+ umlx := typ.Implements(unmarshalFasterType) || reflect.PtrTo(typ).Implements(unmarshalFasterType)
+ umlstd := typ.Implements(unmarshalerType) || reflect.PtrTo(typ).Implements(unmarshalerType)
+ if umlstd && !umlx {
+ // structure has UnmarshalJSON, but not our faster version -- skip it.
+ return false
+ }
+ return true
+ }
+
+ func (i *Inception) wantMarshal(si *StructInfo) bool {
+ if si.Options.SkipEncoder {
+ return false
+ }
+ typ := si.Typ
+ mlx := typ.Implements(marshalerFasterType) || reflect.PtrTo(typ).Implements(marshalerFasterType)
+ mlstd := typ.Implements(marshalerType) || reflect.PtrTo(typ).Implements(marshalerType)
+ if mlstd && !mlx {
+ // structure has MarshalJSON, but not our faster version -- skip it.
+ return false
+ }
+ return true
+ }
+
+ type sortedStructs []*StructInfo
+
+ func (p sortedStructs) Len() int { return len(p) }
+ func (p sortedStructs) Less(i, j int) bool { return p[i].Name < p[j].Name }
+ func (p sortedStructs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+ func (p sortedStructs) Sort() { sort.Sort(p) }
+
+ func (i *Inception) generateCode() error {
+ // We sort the structs by name, so output if predictable.
+ sorted := sortedStructs(i.objs)
+ sorted.Sort()
+
+ for _, si := range sorted {
+ if i.wantMarshal(si) {
+ err := CreateMarshalJSON(i, si)
+ if err != nil {
+ return err
+ }
+ }
+
+ if i.wantUnmarshal(si) {
+ err := CreateUnmarshalJSON(i, si)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ func (i *Inception) handleError(err error) {
+ fmt.Fprintf(os.Stderr, "Error: %s:\n\n", err)
+ os.Exit(1)
+ }
+
+ func (i *Inception) Execute() {
+ if len(os.Args) != 1 {
+ i.handleError(errors.New(fmt.Sprintf("Internal ffjson error: inception executable takes no args: %v", os.Args)))
+ return
+ }
+
+ err := i.generateCode()
+ if err != nil {
+ i.handleError(err)
+ return
+ }
+
+ data, err := RenderTemplate(i)
+ if err != nil {
+ i.handleError(err)
+ return
+ }
+
+ stat, err := os.Stat(i.InputPath)
+
+ if err != nil {
+ i.handleError(err)
+ return
+ }
+
+ err = ioutil.WriteFile(i.OutputPath, data, stat.Mode())
+
+ if err != nil {
+ i.handleError(err)
+ return
+ }
+
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/reflect.go'
Index: ./github.com/pquerna/ffjson/inception/reflect.go
*** ./github.com/pquerna/ffjson/inception/reflect.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/reflect.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,290 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ fflib "github.com/pquerna/ffjson/fflib/v1"
+ "github.com/pquerna/ffjson/shared"
+
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "unicode/utf8"
+ )
+
+ type StructField struct {
+ Name string
+ JsonName string
+ FoldFuncName string
+ Typ reflect.Type
+ OmitEmpty bool
+ ForceString bool
+ HasMarshalJSON bool
+ HasUnmarshalJSON bool
+ Pointer bool
+ Tagged bool
+ }
+
+ type FieldByJsonName []*StructField
+
+ func (a FieldByJsonName) Len() int { return len(a) }
+ func (a FieldByJsonName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+ func (a FieldByJsonName) Less(i, j int) bool { return a[i].JsonName < a[j].JsonName }
+
+ type StructInfo struct {
+ Name string
+ Obj interface{}
+ Typ reflect.Type
+ Fields []*StructField
+ Options shared.StructOptions
+ }
+
+ func NewStructInfo(obj shared.InceptionType) *StructInfo {
+ t := reflect.TypeOf(obj.Obj)
+ return &StructInfo{
+ Obj: obj.Obj,
+ Name: t.Name(),
+ Typ: t,
+ Fields: extractFields(obj.Obj),
+ Options: obj.Options,
+ }
+ }
+
+ func (si *StructInfo) FieldsByFirstByte() map[string][]*StructField {
+ rv := make(map[string][]*StructField)
+ for _, f := range si.Fields {
+ b := string(f.JsonName[1])
+ rv[b] = append(rv[b], f)
+ }
+ return rv
+ }
+
+ func (si *StructInfo) ReverseFields() []*StructField {
+ var i int
+ rv := make([]*StructField, 0)
+ for i = len(si.Fields) - 1; i >= 0; i-- {
+ rv = append(rv, si.Fields[i])
+ }
+ return rv
+ }
+
+ const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ )
+
+ func foldFunc(key []byte) string {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range key {
+ if b >= utf8.RuneSelf {
+ return "bytes.EqualFold"
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return "fflib.EqualFoldRight"
+ }
+ if nonLetter {
+ return "fflib.AsciiEqualFold"
+ }
+ return "fflib.SimpleLetterEqualFold"
+ }
+
+ type MarshalerFaster interface {
+ MarshalJSONBuf(buf fflib.EncodingBuffer) error
+ }
+
+ type UnmarshalFaster interface {
+ UnmarshalJSONFFLexer(l *fflib.FFLexer, state fflib.FFParseState) error
+ }
+
+ var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
+ var marshalerFasterType = reflect.TypeOf(new(MarshalerFaster)).Elem()
+ var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem()
+ var unmarshalFasterType = reflect.TypeOf(new(UnmarshalFaster)).Elem()
+
+ // extractFields returns a list of fields that JSON should recognize for the given type.
+ // The algorithm is breadth-first search over the set of structs to include - the top struct
+ // and then any reachable anonymous structs.
+ func extractFields(obj interface{}) []*StructField {
+ t := reflect.TypeOf(obj)
+ // Anonymous fields to explore at the current level and the next.
+ current := []StructField{}
+ next := []StructField{{Typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []*StructField
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.Typ] {
+ continue
+ }
+ visited[f.Typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.Typ.NumField(); i++ {
+ sf := f.Typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+
+ ft := sf.Type
+ ptr := false
+ if ft.Kind() == reflect.Ptr {
+ ptr = true
+ }
+
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+
+ var buf bytes.Buffer
+ fflib.WriteJsonString(&buf, name)
+
+ field := &StructField{
+ Name: sf.Name,
+ JsonName: string(buf.Bytes()),
+ FoldFuncName: foldFunc([]byte(name)),
+ Typ: ft,
+ HasMarshalJSON: ft.Implements(marshalerType),
+ HasUnmarshalJSON: ft.Implements(unmarshalerType),
+ OmitEmpty: opts.Contains("omitempty"),
+ ForceString: opts.Contains("string"),
+ Pointer: ptr,
+ Tagged: tagged,
+ }
+
+ fields = append(fields, field)
+
+ if count[f.Typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, StructField{
+ Name: ft.Name(),
+ Typ: ft,
+ })
+ }
+ }
+ }
+ }
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.JsonName
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.JsonName != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+
+ return fields
+ }
+
+ // dominantField looks through the fields, all of which are known to
+ // have the same name, to find the single field that dominates the
+ // others using Go's embedding rules, modified by the presence of
+ // JSON tags. If there are multiple top-level fields, the boolean
+ // will be false: This condition is an error in Go and we skip all
+ // the fields.
+ func dominantField(fields []*StructField) (*StructField, bool) {
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if f.Tagged {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return nil, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return nil, false
+ }
+ return fields[0], true
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/tags.go'
Index: ./github.com/pquerna/ffjson/inception/tags.go
*** ./github.com/pquerna/ffjson/inception/tags.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/tags.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,79 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "strings"
+ "unicode"
+ )
+
+ // from: http://golang.org/src/pkg/encoding/json/tags.go
+
+ // tagOptions is the string following a comma in a struct field's "json"
+ // tag, or the empty string. It does not include the leading comma.
+ type tagOptions string
+
+ // parseTag splits a struct field's json tag into its name and
+ // comma-separated options.
+ func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+ }
+
+ // Contains reports whether a comma-separated list of options
+ // contains a particular substr flag. substr must be surrounded by a
+ // string boundary or commas.
+ func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+ }
+
+ func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/template.go'
Index: ./github.com/pquerna/ffjson/inception/template.go
*** ./github.com/pquerna/ffjson/inception/template.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/template.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,60 ----
+ /**
+ * Copyright 2014 Paul Querna
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package ffjsoninception
+
+ import (
+ "bytes"
+ "go/format"
+ "text/template"
+ )
+
+ const ffjsonTemplate = `
+ // Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
+ // source: {{.InputPath}}
+
+ package {{.PackageName}}
+
+ import (
+ {{range $k, $v := .OutputImports}}{{$k}}
+ {{end}}
+ )
+
+ {{range .OutputFuncs}}
+ {{.}}
+ {{end}}
+
+ `
+
+ func RenderTemplate(ic *Inception) ([]byte, error) {
+ t := template.Must(template.New("ffjson.go").Parse(ffjsonTemplate))
+ buf := new(bytes.Buffer)
+ err := t.Execute(buf, ic)
+ if err != nil {
+ return nil, err
+ }
+ return format.Source(buf.Bytes())
+ }
+
+ func tplStr(t *template.Template, data interface{}) string {
+ buf := bytes.Buffer{}
+ err := t.Execute(&buf, data)
+ if err != nil {
+ panic(err)
+ }
+ return buf.String()
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/inception/writerstack.go'
Index: ./github.com/pquerna/ffjson/inception/writerstack.go
*** ./github.com/pquerna/ffjson/inception/writerstack.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/inception/writerstack.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,65 ----
+ package ffjsoninception
+
+ import "strings"
+
+ // ConditionalWrite is a stack containing a number of pending writes
+ type ConditionalWrite struct {
+ Queued []string
+ }
+
+ // Write will add a string to be written
+ func (w *ConditionalWrite) Write(s string) {
+ w.Queued = append(w.Queued, s)
+ }
+
+ // DeleteLast will delete the last added write
+ func (w *ConditionalWrite) DeleteLast() {
+ if len(w.Queued) == 0 {
+ return
+ }
+ w.Queued = w.Queued[:len(w.Queued)-1]
+ }
+
+ // Last will return the last added write
+ func (w *ConditionalWrite) Last() string {
+ if len(w.Queued) == 0 {
+ return ""
+ }
+ return w.Queued[len(w.Queued)-1]
+ }
+
+ // Flush will return all queued writes, and return
+ // "" (empty string) in nothing has been queued
+ // "buf.WriteByte('" + byte + "')" + '\n' if one bute has been queued.
+ // "buf.WriteString(`" + string + "`)" + "\n" if more than one byte has been queued.
+ func (w *ConditionalWrite) Flush() string {
+ combined := strings.Join(w.Queued, "")
+ if len(combined) == 0 {
+ return ""
+ }
+
+ w.Queued = nil
+ if len(combined) == 1 {
+ return "buf.WriteByte('" + combined + "')" + "\n"
+ }
+ return "buf.WriteString(`" + combined + "`)" + "\n"
+ }
+
+ func (w *ConditionalWrite) FlushTo(out string) string {
+ out += w.Flush()
+ return out
+ }
+
+ // WriteFlush will add a string and return the Flush result for the queue
+ func (w *ConditionalWrite) WriteFlush(s string) string {
+ w.Write(s)
+ return w.Flush()
+ }
+
+ // GetQueued will return the current queued content without flushing.
+ func (w *ConditionalWrite) GetQueued() string {
+ t := w.Queued
+ s := w.Flush()
+ w.Queued = t
+ return s
+ }
diff -c /dev/null '_vendor/github.com/pquerna/ffjson/shared/options.go'
Index: ./github.com/pquerna/ffjson/shared/options.go
*** ./github.com/pquerna/ffjson/shared/options.go Thu Jan 1 03:00:00 1970
--- ./github.com/pquerna/ffjson/shared/options.go Thu Oct 26 14:17:41 2023
***************
*** 0 ****
--- 1,51 ----
+ /**
+ * Copyright 2014 Paul Querna, Klaus Post
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+ package shared
+
+ type StructOptions struct {
+ SkipDecoder bool
+ SkipEncoder bool
+ }
+
+ type InceptionType struct {
+ Obj interface{}
+ Options StructOptions
+ }
+ type Feature int
+
+ const (
+ Nothing Feature = 0
+ MustDecoder = 1 << 1
+ MustEncoder = 1 << 2
+ MustEncDec = MustDecoder | MustEncoder
+ )
+
+ func (i InceptionType) HasFeature(f Feature) bool {
+ return i.HasFeature(f)
+ }
+
+ func (s StructOptions) HasFeature(f Feature) bool {
+ hasNeeded := true
+ if f&MustDecoder != 0 && s.SkipDecoder {
+ hasNeeded = false
+ }
+ if f&MustEncoder != 0 && s.SkipEncoder {
+ hasNeeded = false
+ }
+ return hasNeeded
+ }
diff -c 'vendor/github.com/prometheus/client_golang/prometheus/go_collector.go' '_vendor/github.com/prometheus/client_golang/prometheus/go_collector.go'
Index: ./github.com/prometheus/client_golang/prometheus/go_collector.go
*** ./github.com/prometheus/client_golang/prometheus/go_collector.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/client_golang/prometheus/go_collector.go Thu Oct 26 15:21:07 2023
***************
*** 15,36 ****
import (
"runtime"
- "runtime/debug"
- "time"
)
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
--- 15,26 ----
***************
*** 47,60 ****
valType: GaugeValue,
}, {
desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
--- 37,42 ----
***************
*** 71,84 ****
valType: CounterValue,
}, {
desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
--- 53,58 ----
***************
*** 111,202 ****
valType: GaugeValue,
}, {
desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
},
}
}
--- 85,96 ----
***************
*** 245,266 ****
// Collect returns the current state of all metrics of the collector.
func (c *baseGoCollector) Collect(ch chan<- Metric) {
! ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
! n, _ := runtime.ThreadCreateProfile(nil)
! ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
!
! var stats debug.GCStats
! stats.PauseQuantiles = make([]time.Duration, 5)
! debug.ReadGCStats(&stats)
!
! quantiles := make(map[float64]float64)
! for idx, pq := range stats.PauseQuantiles[1:] {
! quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
! }
! quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
! ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
! ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
! ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
func memstatNamespace(s string) string {
--- 139,145 ----
// Collect returns the current state of all metrics of the collector.
func (c *baseGoCollector) Collect(ch chan<- Metric) {
!
}
func memstatNamespace(s string) string {
diff -c 'vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go' '_vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go'
Index: ./github.com/prometheus/client_golang/prometheus/go_collector_latest.go
*** ./github.com/prometheus/client_golang/prometheus/go_collector_latest.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/client_golang/prometheus/go_collector_latest.go Thu Oct 26 15:21:07 2023
***************
*** 20,26 ****
"math"
"runtime"
"runtime/metrics"
- "strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
--- 20,25 ----
***************
*** 78,90 ****
func bestEffortLookupRM(lookup []string) []metrics.Description {
ret := make([]metrics.Description, 0, len(lookup))
- for _, rm := range metrics.All() {
- for _, m := range lookup {
- if m == rm.Name {
- ret = append(ret, rm)
- }
- }
- }
return ret
}
--- 77,82 ----
***************
*** 156,183 ****
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
- for _, d := range descriptions {
- if d.Kind == metrics.KindFloat64Histogram {
- histograms = append(histograms, metrics.Sample{Name: d.Name})
- }
- }
if len(histograms) > 0 {
metrics.Read(histograms)
}
- bucketsMap := make(map[string][]float64)
- for i := range histograms {
- bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
- }
-
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
d := &descriptions[i]
! namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
--- 148,165 ----
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
if len(histograms) > 0 {
metrics.Read(histograms)
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
d := &descriptions[i]
! _, _, _, ok := internal.RuntimeMetricsToProm(d)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
***************
*** 187,224 ****
// Set up sample buffer for reading, and a map
// for quick lookup of sample values.
- sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
- sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m collectorMetric
- if d.Kind == metrics.KindFloat64Histogram {
- _, hasSum := rmExactSumMap[d.Name]
- unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
- m = newBatchHistogram(
- NewDesc(
- BuildFQName(namespace, subsystem, name),
- d.Description,
- nil,
- nil,
- ),
- internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
- hasSum,
- )
- } else if d.Cumulative {
- m = NewCounter(CounterOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: d.Description,
- })
- } else {
- m = NewGauge(GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: name,
- Help: d.Description,
- })
- }
metricSet = append(metricSet, m)
}
--- 169,176 ----
***************
*** 273,303 ****
}
if c.opt.isEnabled(goRuntimeMetricsCollection) {
- // Collect all our metrics from rmSampleBuf.
- for i, sample := range c.rmSampleBuf {
- // N.B. switch on concrete type because it's significantly more efficient
- // than checking for the Counter and Gauge interface implementations. In
- // this case, we control all the types here.
- switch m := c.rmMetrics[i].(type) {
- case *counter:
- // Guard against decreases. This should never happen, but a failure
- // to do so will result in a panic, which is a harsh consequence for
- // a metrics collection bug.
- v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
- if v1 > v0 {
- m.Add(unwrapScalarRMValue(sample.Value) - m.get())
- }
- m.Collect(ch)
- case *gauge:
- m.Set(unwrapScalarRMValue(sample.Value))
- m.Collect(ch)
- case *batchHistogram:
- m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
- m.Collect(ch)
- default:
- panic("unexpected metric type")
- }
- }
}
// ms is a dummy MemStats that we populate ourselves so that we can
--- 225,230 ----
***************
*** 315,338 ****
// to be scalar and returns the equivalent float64 value. Panics if the
// value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 {
! switch v.Kind() {
! case metrics.KindUint64:
! return float64(v.Uint64())
! case metrics.KindFloat64:
! return v.Float64()
! case metrics.KindBad:
! // Unsupported metric.
! //
! // This should never happen because we always populate our metric
! // set from the runtime/metrics package.
! panic("unexpected unsupported metric")
! default:
! // Unsupported metric kind.
! //
! // This should never happen because we check for this during initialization
! // and flag and filter metrics whose kinds we don't understand.
! panic("unexpected unsupported metric kind")
! }
}
var rmExactSumMap = map[string]string{
--- 242,248 ----
// to be scalar and returns the equivalent float64 value. Panics if the
// value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 {
! return 0
}
var rmExactSumMap = map[string]string{
***************
*** 351,368 ****
if !ok {
return 0
}
! s, ok := c.rmSampleMap[sumName]
if !ok {
return 0
}
! return unwrapScalarRMValue(s.Value)
}
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
lookupOrZero := func(name string) uint64 {
- if s, ok := rm[name]; ok {
- return s.Value.Uint64()
- }
return 0
}
--- 261,275 ----
if !ok {
return 0
}
! _, ok = c.rmSampleMap[sumName]
if !ok {
return 0
}
! return 0
}
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
lookupOrZero := func(name string) uint64 {
return 0
}
***************
*** 378,408 ****
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
- ms.Lookups = 0 // Already always zero.
- ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
- ms.Alloc = ms.HeapAlloc
- ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
- ms.HeapObjects = lookupOrZero(goGCHeapObjects)
- ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
- ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
- ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
- ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
- ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
- ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
- ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
- ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
- ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
// of the process.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
- ms.GCCPUFraction = 0
}
// batchHistogram is a mutable histogram that is updated
--- 285,300 ----
***************
*** 454,479 ****
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
- counts, buckets := his.Counts, his.Buckets
-
- h.mu.Lock()
- defer h.mu.Unlock()
-
- // Clear buckets.
- for i := range h.counts {
- h.counts[i] = 0
- }
- // Copy and reduce buckets.
- var j int
- for i, count := range counts {
- h.counts[j] += count
- if buckets[i+1] == h.buckets[j+1] {
- j++
- }
- }
- if h.hasSum {
- h.sum = sum
- }
}
func (h *batchHistogram) Desc() *Desc {
--- 346,351 ----
diff -c 'vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go' '_vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go'
Index: ./github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
*** ./github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go Thu Oct 26 15:21:07 2023
***************
*** 18,28 ****
import (
"math"
- "path"
"runtime/metrics"
- "strings"
-
- "github.com/prometheus/common/model"
)
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
--- 18,24 ----
***************
*** 37,80 ****
// character set. This is theoretically possible, but should never happen in practice.
// Still, don't rely on it.
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
! namespace := "go"
!
! comp := strings.SplitN(d.Name, ":", 2)
! key := comp[0]
! unit := comp[1]
!
! // The last path element in the key is the name,
! // the rest is the subsystem.
! subsystem := path.Dir(key[1:] /* remove leading / */)
! name := path.Base(key)
!
! // subsystem is translated by replacing all / and - with _.
! subsystem = strings.ReplaceAll(subsystem, "/", "_")
! subsystem = strings.ReplaceAll(subsystem, "-", "_")
!
! // unit is translated assuming that the unit contains no
! // non-ASCII characters.
! unit = strings.ReplaceAll(unit, "-", "_")
! unit = strings.ReplaceAll(unit, "*", "_")
! unit = strings.ReplaceAll(unit, "/", "_per_")
!
! // name has - replaced with _ and is concatenated with the unit and
! // other data.
! name = strings.ReplaceAll(name, "-", "_")
! name = name + "_" + unit
! if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
! name = name + "_total"
! }
!
! valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
! switch d.Kind {
! case metrics.KindUint64:
! case metrics.KindFloat64:
! case metrics.KindFloat64Histogram:
! default:
! valid = false
! }
! return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
--- 33,39 ----
// character set. This is theoretically possible, but should never happen in practice.
// Still, don't rely on it.
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
! return "fake", "fake", "fake", true
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
diff -c 'vendor/github.com/prometheus/client_golang/prometheus/registry.go' '_vendor/github.com/prometheus/client_golang/prometheus/registry.go'
Index: ./github.com/prometheus/client_golang/prometheus/registry.go
*** ./github.com/prometheus/client_golang/prometheus/registry.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/client_golang/prometheus/registry.go Thu Oct 26 15:21:07 2023
***************
*** 59,66 ****
)
func init() {
! MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
! MustRegister(NewGoCollector())
}
// NewRegistry creates a new vanilla Registry without any Collectors
--- 59,66 ----
)
func init() {
! /*MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
! MustRegister(NewGoCollector())*/
}
// NewRegistry creates a new vanilla Registry without any Collectors
***************
*** 575,584 ****
return err
}
! if err := os.Chmod(tmp.Name(), 0644); err != nil {
! return err
! }
! return os.Rename(tmp.Name(), filename)
}
// processMetric is an internal helper method only used by the Gather method.
--- 575,581 ----
return err
}
! return nil
}
// processMetric is an internal helper method only used by the Gather method.
diff -c 'vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go' '_vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go'
Index: ./github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
*** ./github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go Mon Mar 11 17:47:29 2024
***************
*** 17,23 ****
import (
"fmt"
"io"
! "regexp"
"sort"
"strings"
--- 17,23 ----
import (
"fmt"
"io"
! "wasm_parts/regexp"
"sort"
"strings"
diff -c 'vendor/github.com/prometheus/common/model/labels.go' '_vendor/github.com/prometheus/common/model/labels.go'
Index: ./github.com/prometheus/common/model/labels.go
*** ./github.com/prometheus/common/model/labels.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/common/model/labels.go Mon Mar 11 17:56:25 2024
***************
*** 16,22 ****
import (
"encoding/json"
"fmt"
! "regexp"
"strings"
"unicode/utf8"
)
--- 16,22 ----
import (
"encoding/json"
"fmt"
! "wasm_parts/regexp"
"strings"
"unicode/utf8"
)
***************
*** 91,97 ****
// LabelNameRE is a regular expression matching valid label names. Note that the
// IsValid method of LabelName performs the same check but faster than a match
// with this regular expression.
! var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
--- 91,97 ----
// LabelNameRE is a regular expression matching valid label names. Note that the
// IsValid method of LabelName performs the same check but faster than a match
// with this regular expression.
! var LabelNameRE = func() *regexp.Regexp { return regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") }
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
diff -c 'vendor/github.com/prometheus/common/model/metric.go' '_vendor/github.com/prometheus/common/model/metric.go'
Index: ./github.com/prometheus/common/model/metric.go
*** ./github.com/prometheus/common/model/metric.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/common/model/metric.go Mon Mar 11 17:57:53 2024
***************
*** 15,21 ****
import (
"fmt"
! "regexp"
"sort"
"strings"
)
--- 15,21 ----
import (
"fmt"
! "wasm_parts/regexp"
"sort"
"strings"
)
***************
*** 24,30 ****
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
! MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
--- 24,30 ----
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
! MetricNameRE = func() *regexp.Regexp { return regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) }
)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
diff -c 'vendor/github.com/prometheus/common/model/silence.go' '_vendor/github.com/prometheus/common/model/silence.go'
Index: ./github.com/prometheus/common/model/silence.go
*** ./github.com/prometheus/common/model/silence.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/common/model/silence.go Mon Mar 11 17:47:29 2024
***************
*** 16,22 ****
import (
"encoding/json"
"fmt"
! "regexp"
"time"
)
--- 16,22 ----
import (
"encoding/json"
"fmt"
! "wasm_parts/regexp"
"time"
)
diff -c 'vendor/github.com/prometheus/common/model/time.go' '_vendor/github.com/prometheus/common/model/time.go'
Index: ./github.com/prometheus/common/model/time.go
*** ./github.com/prometheus/common/model/time.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/common/model/time.go Mon Mar 11 17:52:45 2024
***************
*** 18,24 ****
"errors"
"fmt"
"math"
! "regexp"
"strconv"
"strings"
"time"
--- 18,24 ----
"errors"
"fmt"
"math"
! "wasm_parts/regexp"
"strconv"
"strings"
"time"
***************
*** 183,193 ****
return "duration"
}
! var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
switch durationStr {
case "0":
// Allow 0 without a unit.
--- 183,194 ----
return "duration"
}
!
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
+ var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
switch durationStr {
case "0":
// Allow 0 without a unit.
diff -c 'vendor/github.com/prometheus/procfs/cpuinfo.go' '_vendor/github.com/prometheus/procfs/cpuinfo.go'
Index: ./github.com/prometheus/procfs/cpuinfo.go
*** ./github.com/prometheus/procfs/cpuinfo.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/cpuinfo.go Mon Mar 11 17:47:29 2024
***************
*** 20,26 ****
"bytes"
"errors"
"fmt"
! "regexp"
"strconv"
"strings"
--- 20,26 ----
"bytes"
"errors"
"fmt"
! "wasm_parts/regexp"
"strconv"
"strings"
diff -c 'vendor/github.com/prometheus/procfs/mdstat.go' '_vendor/github.com/prometheus/procfs/mdstat.go'
Index: ./github.com/prometheus/procfs/mdstat.go
*** ./github.com/prometheus/procfs/mdstat.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/mdstat.go Mon Mar 11 17:47:29 2024
***************
*** 16,22 ****
import (
"fmt"
"io/ioutil"
! "regexp"
"strconv"
"strings"
)
--- 16,22 ----
import (
"fmt"
"io/ioutil"
! "wasm_parts/regexp"
"strconv"
"strings"
)
diff -c 'vendor/github.com/prometheus/procfs/proc_fdinfo.go' '_vendor/github.com/prometheus/procfs/proc_fdinfo.go'
Index: ./github.com/prometheus/procfs/proc_fdinfo.go
*** ./github.com/prometheus/procfs/proc_fdinfo.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/proc_fdinfo.go Mon Mar 11 17:47:29 2024
***************
*** 17,23 ****
"bufio"
"bytes"
"fmt"
! "regexp"
"github.com/prometheus/procfs/internal/util"
)
--- 17,23 ----
"bufio"
"bytes"
"fmt"
! "wasm_parts/regexp"
"github.com/prometheus/procfs/internal/util"
)
diff -c 'vendor/github.com/prometheus/procfs/proc_limits.go' '_vendor/github.com/prometheus/procfs/proc_limits.go'
Index: ./github.com/prometheus/procfs/proc_limits.go
*** ./github.com/prometheus/procfs/proc_limits.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/proc_limits.go Mon Mar 11 17:47:20 2024
***************
*** 17,23 ****
"bufio"
"fmt"
"os"
! "regexp"
"strconv"
)
--- 17,23 ----
"bufio"
"fmt"
"os"
! "wasm_parts/regexp"
"strconv"
)
diff -c 'vendor/github.com/prometheus/procfs/proc_smaps.go' '_vendor/github.com/prometheus/procfs/proc_smaps.go'
Index: ./github.com/prometheus/procfs/proc_smaps.go
*** ./github.com/prometheus/procfs/proc_smaps.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/proc_smaps.go Mon Mar 11 17:47:29 2024
***************
*** 20,26 ****
"errors"
"fmt"
"os"
! "regexp"
"strconv"
"strings"
--- 20,26 ----
"errors"
"fmt"
"os"
! "wasm_parts/regexp"
"strconv"
"strings"
diff -c 'vendor/github.com/prometheus/procfs/proc_stat.go' '_vendor/github.com/prometheus/procfs/proc_stat.go'
Index: ./github.com/prometheus/procfs/proc_stat.go
*** ./github.com/prometheus/procfs/proc_stat.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/proc_stat.go Thu Oct 26 15:21:07 2023
***************
*** 16,22 ****
import (
"bytes"
"fmt"
- "os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
--- 16,21 ----
***************
*** 198,204 ****
// ResidentMemory returns the resident memory size in bytes.
func (s ProcStat) ResidentMemory() int {
! return s.RSS * os.Getpagesize()
}
// StartTime returns the unix timestamp of the process in seconds.
--- 197,203 ----
// ResidentMemory returns the resident memory size in bytes.
func (s ProcStat) ResidentMemory() int {
! return s.RSS * 65536
}
// StartTime returns the unix timestamp of the process in seconds.
diff -c 'vendor/github.com/prometheus/procfs/schedstat.go' '_vendor/github.com/prometheus/procfs/schedstat.go'
Index: ./github.com/prometheus/procfs/schedstat.go
*** ./github.com/prometheus/procfs/schedstat.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/schedstat.go Mon Mar 11 17:47:29 2024
***************
*** 17,23 ****
"bufio"
"errors"
"os"
! "regexp"
"strconv"
)
--- 17,23 ----
"bufio"
"errors"
"os"
! "wasm_parts/regexp"
"strconv"
)
diff -c 'vendor/github.com/prometheus/procfs/slab.go' '_vendor/github.com/prometheus/procfs/slab.go'
Index: ./github.com/prometheus/procfs/slab.go
*** ./github.com/prometheus/procfs/slab.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/slab.go Mon Mar 11 17:47:29 2024
***************
*** 17,23 ****
"bufio"
"bytes"
"fmt"
! "regexp"
"strconv"
"strings"
--- 17,23 ----
"bufio"
"bytes"
"fmt"
! "wasm_parts/regexp"
"strconv"
"strings"
diff -c 'vendor/github.com/prometheus/procfs/zoneinfo.go' '_vendor/github.com/prometheus/procfs/zoneinfo.go'
Index: ./github.com/prometheus/procfs/zoneinfo.go
*** ./github.com/prometheus/procfs/zoneinfo.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/procfs/zoneinfo.go Mon Mar 11 17:47:29 2024
***************
*** 19,25 ****
"bytes"
"fmt"
"io/ioutil"
! "regexp"
"strings"
"github.com/prometheus/procfs/internal/util"
--- 19,25 ----
"bytes"
"fmt"
"io/ioutil"
! "wasm_parts/regexp"
"strings"
"github.com/prometheus/procfs/internal/util"
diff -c 'vendor/github.com/prometheus/prometheus/discovery/registry.go' '_vendor/github.com/prometheus/prometheus/discovery/registry.go'
Index: ./github.com/prometheus/prometheus/discovery/registry.go
*** ./github.com/prometheus/prometheus/discovery/registry.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/discovery/registry.go Thu Oct 26 15:21:07 2023
***************
*** 17,23 ****
"errors"
"fmt"
"reflect"
- "sort"
"strconv"
"strings"
"sync"
--- 17,22 ----
***************
*** 58,64 ****
}
func registerConfig(yamlKey string, elemType reflect.Type, config Config) {
! name := config.Name()
if _, ok := configNames[name]; ok {
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
}
--- 57,63 ----
}
func registerConfig(yamlKey string, elemType reflect.Type, config Config) {
! /*name := config.Name()
if _, ok := configNames[name]; ok {
panic(fmt.Sprintf("discovery: Config named %q is already registered", name))
}
***************
*** 77,83 ****
Name: fieldName,
Type: reflect.SliceOf(elemType),
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
! }
}
func getConfigType(out reflect.Type) reflect.Type {
--- 76,82 ----
Name: fieldName,
Type: reflect.SliceOf(elemType),
Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`),
! }*/
}
func getConfigType(out reflect.Type) reflect.Type {
diff -c 'vendor/github.com/prometheus/prometheus/promql/engine.go' '_vendor/github.com/prometheus/prometheus/promql/engine.go'
Index: ./github.com/prometheus/prometheus/promql/engine.go
*** ./github.com/prometheus/prometheus/promql/engine.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/promql/engine.go Mon Mar 11 11:26:55 2024
***************
*** 20,26 ****
"errors"
"fmt"
"math"
- "reflect"
"runtime"
"sort"
"strconv"
--- 20,25 ----
***************
*** 30,47 ****
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/regexp"
- "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
- "github.com/prometheus/prometheus/util/stats"
)
const (
--- 29,41 ----
***************
*** 57,73 ****
minInt64 = -9223372036854775808
)
- type engineMetrics struct {
- currentQueries prometheus.Gauge
- maxConcurrentQueries prometheus.Gauge
- queryLogEnabled prometheus.Gauge
- queryLogFailures prometheus.Counter
- queryQueueTime prometheus.Observer
- queryPrepareTime prometheus.Observer
- queryInnerEval prometheus.Observer
- queryResultSort prometheus.Observer
- }
-
// convertibleToInt64 returns true if v does not over-/underflow an int64.
func convertibleToInt64(v float64) bool {
return v <= maxInt64 && v >= minInt64
--- 51,56 ----
***************
*** 117,124 ****
Close()
// Statement returns the parsed statement of the query.
Statement() parser.Statement
- // Stats returns statistics about the lifetime of the query.
- Stats() *stats.Statistics
// Cancel signals that a running query execution should be aborted.
Cancel()
// String returns the original query string.
--- 100,105 ----
***************
*** 138,147 ****
q string
// Statement of the parsed query.
stmt parser.Statement
- // Timer stats for the query execution.
- stats *stats.QueryTimers
- // Sample stats for the query execution.
- sampleStats *stats.QuerySamples
// Result matrix for reuse.
matrix Matrix
// Cancellation function for the query.
--- 119,124 ----
***************
*** 165,178 ****
return q.q
}
- // Stats implements the Query interface.
- func (q *query) Stats() *stats.Statistics {
- return &stats.Statistics{
- Timers: q.stats,
- Samples: q.sampleStats,
- }
- }
-
// Cancel implements the Query interface.
func (q *query) Cancel() {
if q.cancel != nil {
--- 142,147 ----
***************
*** 189,197 ****
// Exec implements the Query interface.
func (q *query) Exec(ctx context.Context) *Result {
- if span := trace.SpanFromContext(ctx); span != nil {
- span.SetAttributes(attribute.String(queryTag, q.stmt.String()))
- }
// Exec query.
res, warnings, err := q.ng.exec(ctx, q)
--- 158,163 ----
***************
*** 240,246 ****
// EngineOpts contains configuration options used when creating a new Engine.
type EngineOpts struct {
Logger log.Logger
- Reg prometheus.Registerer
MaxSamples int
Timeout time.Duration
ActiveQueryTracker QueryTracker
--- 206,211 ----
***************
*** 273,279 ****
// It is connected to a querier.
type Engine struct {
logger log.Logger
- metrics *engineMetrics
timeout time.Duration
maxSamplesPerQuery int
activeQueryTracker QueryTracker
--- 238,243 ----
***************
*** 292,344 ****
opts.Logger = log.NewNopLogger()
}
- queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "query_duration_seconds",
- Help: "Query timings",
- Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
- },
- []string{"slice"},
- )
-
- metrics := &engineMetrics{
- currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "queries",
- Help: "The current number of queries being executed or waiting.",
- }),
- queryLogEnabled: prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "query_log_enabled",
- Help: "State of the query log.",
- }),
- queryLogFailures: prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "query_log_failures_total",
- Help: "The number of query log failures.",
- }),
- maxConcurrentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: subsystem,
- Name: "queries_concurrent_max",
- Help: "The max number of concurrent queries.",
- }),
- queryQueueTime: queryResultSummary.WithLabelValues("queue_time"),
- queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"),
- queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"),
- queryResultSort: queryResultSummary.WithLabelValues("result_sort"),
- }
-
- if t := opts.ActiveQueryTracker; t != nil {
- metrics.maxConcurrentQueries.Set(float64(t.GetMaxConcurrent()))
- } else {
- metrics.maxConcurrentQueries.Set(-1)
- }
-
if opts.LookbackDelta == 0 {
opts.LookbackDelta = defaultLookbackDelta
if l := opts.Logger; l != nil {
--- 256,261 ----
***************
*** 346,365 ****
}
}
- if opts.Reg != nil {
- opts.Reg.MustRegister(
- metrics.currentQueries,
- metrics.maxConcurrentQueries,
- metrics.queryLogEnabled,
- metrics.queryLogFailures,
- queryResultSummary,
- )
- }
-
return &Engine{
timeout: opts.Timeout,
logger: opts.Logger,
- metrics: metrics,
maxSamplesPerQuery: opts.MaxSamples,
activeQueryTracker: opts.ActiveQueryTracker,
lookbackDelta: opts.LookbackDelta,
--- 263,271 ----
***************
*** 385,396 ****
}
ng.queryLogger = l
-
- if l != nil {
- ng.metrics.queryLogEnabled.Set(1)
- } else {
- ng.metrics.queryLogEnabled.Set(0)
- }
}
// NewInstantQuery returns an evaluation query for the given expression at the given time.
--- 291,296 ----
***************
*** 446,453 ****
qry := &query{
stmt: es,
ng: ng,
- stats: stats.NewQueryTimers(),
- sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats),
queryable: q,
}
return qry, nil
--- 346,351 ----
***************
*** 514,521 ****
q: "test statement",
stmt: parser.TestStmt(f),
ng: ng,
- stats: stats.NewQueryTimers(),
- sampleStats: stats.NewQuerySamples(ng.enablePerStepStats),
}
return qry
}
--- 412,417 ----
***************
*** 525,532 ****
// At this point per query only one EvalStmt is evaluated. Alert and record
// statements are not handled by the Engine.
func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) {
- ng.metrics.currentQueries.Inc()
- defer ng.metrics.currentQueries.Dec()
ctx, cancel := context.WithTimeout(ctx, ng.timeout)
q.cancel = cancel
--- 421,426 ----
***************
*** 546,589 ****
if err != nil {
f = append(f, "error", err)
}
- f = append(f, "stats", stats.NewQueryStats(q.Stats()))
- if span := trace.SpanFromContext(ctx); span != nil {
- f = append(f, "spanID", span.SpanContext().SpanID())
- }
if origin := ctx.Value(QueryOrigin{}); origin != nil {
for k, v := range origin.(map[string]interface{}) {
f = append(f, k, v)
}
}
if err := l.Log(f...); err != nil {
- ng.metrics.queryLogFailures.Inc()
level.Error(ng.logger).Log("msg", "can't log query", "err", err)
}
}
ng.queryLoggerLock.RUnlock()
}()
- execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime)
- defer execSpanTimer.Finish()
- queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
// Log query in active log. The active log guarantees that we don't run over
// MaxConcurrent queries.
if ng.activeQueryTracker != nil {
queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q)
if err != nil {
- queueSpanTimer.Finish()
return nil, nil, contextErr(err, "query queue")
}
defer ng.activeQueryTracker.Delete(queryIndex)
}
- queueSpanTimer.Finish()
// Cancel when execution is done or an error was raised.
defer q.cancel()
- evalSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.EvalTotalTime)
- defer evalSpanTimer.Finish()
// The base context might already be canceled on the first iteration (e.g. during shutdown).
if err := contextDone(ctx, env); err != nil {
--- 440,471 ----
***************
*** 610,631 ****
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) {
! prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
mint, maxt := ng.findMinMaxTime(s)
querier, err := query.queryable.Querier(ctxPrepare, mint, maxt)
if err != nil {
- prepareSpanTimer.Finish()
return nil, nil, err
}
defer querier.Close()
ng.populateSeries(querier, s)
- prepareSpanTimer.Finish()
// Modify the offset of vector and matrix selectors for the @ modifier
// w.r.t. the start time since only 1 evaluation will be done on them.
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
! evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
// Instant evaluation. This is executed as a range evaluation with one step.
if s.Start == s.End && s.Interval == 0 {
start := timeMilliseconds(s.Start)
--- 492,511 ----
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) {
! ctxPrepare := ctx
mint, maxt := ng.findMinMaxTime(s)
querier, err := query.queryable.Querier(ctxPrepare, mint, maxt)
if err != nil {
return nil, nil, err
}
defer querier.Close()
ng.populateSeries(querier, s)
// Modify the offset of vector and matrix selectors for the @ modifier
// w.r.t. the start time since only 1 evaluation will be done on them.
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
! ctxInnerEval := ctx
// Instant evaluation. This is executed as a range evaluation with one step.
if s.Start == s.End && s.Interval == 0 {
start := timeMilliseconds(s.Start)
***************
*** 637,654 ****
maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger,
lookbackDelta: ng.lookbackDelta,
- samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
}
- query.sampleStats.InitStepTracking(start, start, 1)
val, warnings, err := evaluator.Eval(s.Expr)
if err != nil {
return nil, warnings, err
}
- evalSpanTimer.Finish()
-
var mat Matrix
switch result := val.(type) {
--- 517,530 ----
***************
*** 689,703 ****
maxSamples: ng.maxSamplesPerQuery,
logger: ng.logger,
lookbackDelta: ng.lookbackDelta,
- samplesStats: query.sampleStats,
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
}
- query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval)
val, warnings, err := evaluator.Eval(s.Expr)
if err != nil {
return nil, warnings, err
}
- evalSpanTimer.Finish()
mat, ok := val.(Matrix)
if !ok {
--- 565,576 ----
***************
*** 710,718 ****
}
// TODO(fabxc): where to ensure metric labels are a copy from the storage internals.
- sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort)
sort.Sort(mat)
- sortSpanTimer.Finish()
return mat, warnings, nil
}
--- 583,589 ----
***************
*** 922,928 ****
currentSamples int
logger log.Logger
lookbackDelta time.Duration
- samplesStats *stats.QuerySamples
noStepSubqueryIntervalFn func(rangeMillis int64) int64
}
--- 793,798 ----
***************
*** 1115,1121 ****
}
}
args[i] = vectors[i]
- ev.samplesStats.UpdatePeak(ev.currentSamples)
}
// Make the function call.
--- 985,990 ----
***************
*** 1131,1142 ****
// When we reset currentSamples to tempNumSamples during the next iteration of the loop it also
// needs to include the samples from the result here, as they're still in memory.
tempNumSamples += len(result)
- ev.samplesStats.UpdatePeak(ev.currentSamples)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
// If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp {
--- 1000,1009 ----
***************
*** 1146,1152 ****
mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}}
}
ev.currentSamples = originalNumSamples + mat.TotalSamples()
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return mat, warnings
}
--- 1013,1018 ----
***************
*** 1179,1198 ****
mat = append(mat, ss)
}
ev.currentSamples = originalNumSamples + mat.TotalSamples()
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return mat, warnings
}
// evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) {
- samplesStats := ev.samplesStats
- // Avoid double counting samples when running a subquery, those samples will be counted in later stage.
- ev.samplesStats = ev.samplesStats.NewChild()
val, ws := ev.eval(subq)
// But do incorporate the peak from the subquery
- samplesStats.UpdatePeakFromSubquery(ev.samplesStats)
- ev.samplesStats = samplesStats
mat := val.(Matrix)
vs := &parser.VectorSelector{
OriginalOffset: subq.OriginalOffset,
--- 1045,1058 ----
***************
*** 1227,1235 ****
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
// Create a new span to help investigate inner evaluation performances.
! ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String())
ev.ctx = ctxWithSpan
- defer span.End()
switch e := expr.(type) {
case *parser.AggregateExpr:
--- 1087,1094 ----
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
// Create a new span to help investigate inner evaluation performances.
! ctxWithSpan := ev.ctx
ev.ctx = ctxWithSpan
switch e := expr.(type) {
case *parser.AggregateExpr:
***************
*** 1398,1404 ****
enh.Ts = ts
// Make the function call.
outVec := call(inArgs, e.Args, enh)
- ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points)))
enh.Out = outVec[:0]
if len(outVec) > 0 {
ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts})
--- 1257,1262 ----
***************
*** 1416,1424 ****
} else {
putPointSlice(ss.Points)
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
ev.currentSamples -= len(points)
putPointSlice(points)
--- 1274,1280 ----
***************
*** 1558,1564 ****
if ok {
if ev.currentSamples < ev.maxSamples {
ss.Points = append(ss.Points, Point{V: v, T: ts})
- ev.samplesStats.IncrementSamplesAtStep(step, 1)
ev.currentSamples++
} else {
ev.error(ErrTooManySamples(env))
--- 1414,1419 ----
***************
*** 1572,1578 ****
putPointSlice(ss.Points)
}
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return mat, ws
case *parser.MatrixSelector:
--- 1427,1432 ----
***************
*** 1591,1597 ****
maxSamples: ev.maxSamples,
logger: ev.logger,
lookbackDelta: ev.lookbackDelta,
- samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
}
--- 1445,1450 ----
***************
*** 1617,1624 ****
res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples
- ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
- ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples)
return res, ws
case *parser.StepInvariantExpr:
switch ce := e.Expr.(type) {
--- 1470,1475 ----
***************
*** 1635,1649 ****
maxSamples: ev.maxSamples,
logger: ev.logger,
lookbackDelta: ev.lookbackDelta,
- samplesStats: ev.samplesStats.NewChild(),
noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn,
}
res, ws := newEv.eval(e.Expr)
ev.currentSamples = newEv.currentSamples
- ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats)
for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval {
step++
- ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples)
}
switch e.Expr.(type) {
case *parser.MatrixSelector, *parser.SubqueryExpr:
--- 1486,1497 ----
***************
*** 1674,1680 ****
}
}
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return res, ws
}
--- 1522,1527 ----
***************
*** 1700,1713 ****
})
ev.currentSamples++
- ev.samplesStats.IncrementSamplesAtTimestamp(ts, 1)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
}
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return vec, ws
}
--- 1547,1558 ----
***************
*** 1740,1746 ****
return t, v, true
}
! var pointPool = sync.Pool{}
func getPointSlice(sz int) []Point {
p := pointPool.Get()
--- 1585,1595 ----
return t, v, true
}
! type fakePointPool struct {}
! func (f fakePointPool) Get() interface{} { return nil }
! func (f fakePointPool) Put(x any) { }
!
! var pointPool = fakePointPool{}
func getPointSlice(sz int) []Point {
p := pointPool.Get()
***************
*** 1783,1789 ****
}
ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16))
- ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points)))
if len(ss.Points) > 0 {
matrix = append(matrix, ss)
--- 1632,1637 ----
***************
*** 1855,1861 ****
ev.currentSamples++
}
}
- ev.samplesStats.UpdatePeak(ev.currentSamples)
return out
}
--- 1703,1708 ----
diff -c 'vendor/github.com/prometheus/prometheus/promql/functions.go' '_vendor/github.com/prometheus/prometheus/promql/functions.go'
Index: ./github.com/prometheus/prometheus/promql/functions.go
*** ./github.com/prometheus/prometheus/promql/functions.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/promql/functions.go Mon Mar 11 17:57:07 2024
***************
*** 887,893 ****
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}
! if !model.LabelNameRE.MatchString(dst) {
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
}
enh.Dmn = make(map[uint64]labels.Labels, len(enh.Out))
--- 887,893 ----
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}
! if !model.LabelNameRE().MatchString(dst) {
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
}
enh.Dmn = make(map[uint64]labels.Labels, len(enh.Out))
diff -c 'vendor/github.com/prometheus/prometheus/promql/parser/parse.go' '_vendor/github.com/prometheus/prometheus/promql/parser/parse.go'
Index: ./github.com/prometheus/prometheus/promql/parser/parse.go
*** ./github.com/prometheus/prometheus/promql/parser/parse.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/promql/parser/parse.go Mon Mar 11 11:21:23 2024
***************
*** 21,27 ****
"runtime"
"strconv"
"strings"
- "sync"
"time"
"github.com/prometheus/common/model"
--- 21,26 ----
***************
*** 31,41 ****
"github.com/prometheus/prometheus/util/strutil"
)
! var parserPool = sync.Pool{
! New: func() interface{} {
! return &parser{}
! },
! }
type parser struct {
lex Lexer
--- 30,40 ----
"github.com/prometheus/prometheus/util/strutil"
)
! type fakePool[T any] struct {}
! func (f fakePool[T]) Get() interface{} { return new(T) }
! func (f fakePool[T]) Put(x any) { }
!
! var parserPool = fakePool[parser]{}
type parser struct {
lex Lexer
diff -c 'vendor/github.com/prometheus/prometheus/storage/generic.go' '_vendor/github.com/prometheus/prometheus/storage/generic.go'
Index: ./github.com/prometheus/prometheus/storage/generic.go
*** ./github.com/prometheus/prometheus/storage/generic.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/storage/generic.go Tue Nov 7 15:29:20 2023
***************
*** 105,134 ****
return &chunkSeriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)}
}
- type seriesMergerAdapter struct {
- VerticalSeriesMergeFunc
- }
-
- func (a *seriesMergerAdapter) Merge(s ...Labels) Labels {
- buf := make([]Series, 0, len(s))
- for _, ser := range s {
- buf = append(buf, ser.(Series))
- }
- return a.VerticalSeriesMergeFunc(buf...)
- }
-
- type chunkSeriesMergerAdapter struct {
- VerticalChunkSeriesMergeFunc
- }
-
- func (a *chunkSeriesMergerAdapter) Merge(s ...Labels) Labels {
- buf := make([]ChunkSeries, 0, len(s))
- for _, ser := range s {
- buf = append(buf, ser.(ChunkSeries))
- }
- return a.VerticalChunkSeriesMergeFunc(buf...)
- }
-
type noopGenericSeriesSet struct{}
func (noopGenericSeriesSet) Next() bool { return false }
--- 105,110 ----
diff -c 'vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go' '_vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go'
Index: ./github.com/prometheus/prometheus/tsdb/chunks/chunks.go
*** ./github.com/prometheus/prometheus/tsdb/chunks/chunks.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/tsdb/chunks/chunks.go Tue Nov 7 15:39:45 2023
***************
*** 14,55 ****
package chunks
import (
- "bufio"
- "encoding/binary"
- "fmt"
- "hash"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/pkg/errors"
-
"github.com/prometheus/prometheus/tsdb/chunkenc"
- tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
- "github.com/prometheus/prometheus/tsdb/fileutil"
- )
-
- // Segment header fields constants.
- const (
- // MagicChunks is 4 bytes at the head of a series file.
- MagicChunks = 0x85BD40DD
- // MagicChunksSize is the size in bytes of MagicChunks.
- MagicChunksSize = 4
- chunksFormatV1 = 1
- ChunksFormatVersionSize = 1
- segmentHeaderPaddingSize = 3
- // SegmentHeaderSize defines the total size of the header part.
- SegmentHeaderSize = MagicChunksSize + ChunksFormatVersionSize + segmentHeaderPaddingSize
- )
-
- // Chunk fields constants.
- const (
- // MaxChunkLengthFieldSize defines the maximum size of the data length part.
- MaxChunkLengthFieldSize = binary.MaxVarintLen32
- // ChunkEncodingSize defines the size of the chunk encoding part.
- ChunkEncodingSize = 1
)
// ChunkRef is a generic reference for reading chunk data. In prometheus it
--- 14,20 ----
***************
*** 57,114 ****
// may have their own reference types.
type ChunkRef uint64
- // HeadSeriesRef refers to in-memory series.
- type HeadSeriesRef uint64
-
- // HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID.
- // The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively.
- type HeadChunkRef uint64
-
- func NewHeadChunkRef(hsr HeadSeriesRef, chunkID HeadChunkID) HeadChunkRef {
- if hsr > (1<<40)-1 {
- panic("series ID exceeds 5 bytes")
- }
- if chunkID > (1<<24)-1 {
- panic("chunk ID exceeds 3 bytes")
- }
- return HeadChunkRef(uint64(hsr<<24) | uint64(chunkID))
- }
-
- func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) {
- return HeadSeriesRef(p >> 24), HeadChunkID(p<<40) >> 40
- }
-
- // HeadChunkID refers to a specific chunk in a series (memSeries) in the Head.
- // Each memSeries has its own monotonically increasing number to refer to its chunks.
- // If the HeadChunkID value is...
- // * memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk.
- // * less than the above, but >= memSeries.firstID, then it's
- // memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
- // Example:
- // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
- // | HeadChunkID value | refers to ... |
- // |-------------------|----------------------------------------------------------------------------------------|
- // | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
- // | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
- // | 12 | memSeries.headChunk |
- type HeadChunkID uint64
-
- // BlockChunkRef refers to a chunk within a persisted block.
- // The upper 4 bytes are for the segment index and
- // the lower 4 bytes are for the segment offset where the data starts for this chunk.
- type BlockChunkRef uint64
-
- // NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef.
- func NewBlockChunkRef(fileIndex, fileOffset uint64) BlockChunkRef {
- return BlockChunkRef(fileIndex<<32 | fileOffset)
- }
-
- func (b BlockChunkRef) Unpack() (int, int) {
- sgmIndex := int(b >> 32)
- chkStart := int((b << 32) >> 32)
- return sgmIndex, chkStart
- }
-
// Meta holds information about a chunk of data.
type Meta struct {
// Ref and Chunk hold either a reference that can be used to retrieve
--- 22,27 ----
***************
*** 132,636 ****
// Err returns optional error if Next is false.
Err() error
}
-
- // writeHash writes the chunk encoding and raw data into the provided hash.
- func (cm *Meta) writeHash(h hash.Hash, buf []byte) error {
- buf = append(buf[:0], byte(cm.Chunk.Encoding()))
- if _, err := h.Write(buf[:1]); err != nil {
- return err
- }
- if _, err := h.Write(cm.Chunk.Bytes()); err != nil {
- return err
- }
- return nil
- }
-
- // OverlapsClosedInterval Returns true if the chunk overlaps [mint, maxt].
- func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
- // The chunk itself is a closed interval [cm.MinTime, cm.MaxTime].
- return cm.MinTime <= maxt && mint <= cm.MaxTime
- }
-
- var errInvalidSize = fmt.Errorf("invalid size")
-
- var castagnoliTable *crc32.Table
-
- func init() {
- castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
- }
-
- // newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
- // polynomial may be easily changed in one location at a later time, if necessary.
- func newCRC32() hash.Hash32 {
- return crc32.New(castagnoliTable)
- }
-
- // Check if the CRC of data matches that stored in sum, computed when the chunk was stored.
- func checkCRC32(data, sum []byte) error {
- got := crc32.Checksum(data, castagnoliTable)
- // This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32.
- want := uint32(sum[0])<<24 + uint32(sum[1])<<16 + uint32(sum[2])<<8 + uint32(sum[3])
- if got != want {
- return errors.Errorf("checksum mismatch expected:%x, actual:%x", want, got)
- }
- return nil
- }
-
- // Writer implements the ChunkWriter interface for the standard
- // serialization format.
- type Writer struct {
- dirFile *os.File
- files []*os.File
- wbuf *bufio.Writer
- n int64
- crc32 hash.Hash
- buf [binary.MaxVarintLen32]byte
-
- segmentSize int64
- }
-
- const (
- // DefaultChunkSegmentSize is the default chunks segment size.
- DefaultChunkSegmentSize = 512 * 1024 * 1024
- )
-
- // NewWriterWithSegSize returns a new writer against the given directory
- // and allows setting a custom size for the segments.
- func NewWriterWithSegSize(dir string, segmentSize int64) (*Writer, error) {
- return newWriter(dir, segmentSize)
- }
-
- // NewWriter returns a new writer against the given directory
- // using the default segment size.
- func NewWriter(dir string) (*Writer, error) {
- return newWriter(dir, DefaultChunkSegmentSize)
- }
-
- func newWriter(dir string, segmentSize int64) (*Writer, error) {
- if segmentSize <= 0 {
- segmentSize = DefaultChunkSegmentSize
- }
-
- if err := os.MkdirAll(dir, 0o777); err != nil {
- return nil, err
- }
- dirFile, err := fileutil.OpenDir(dir)
- if err != nil {
- return nil, err
- }
- return &Writer{
- dirFile: dirFile,
- n: 0,
- crc32: newCRC32(),
- segmentSize: segmentSize,
- }, nil
- }
-
- func (w *Writer) tail() *os.File {
- if len(w.files) == 0 {
- return nil
- }
- return w.files[len(w.files)-1]
- }
-
- // finalizeTail writes all pending data to the current tail file,
- // truncates its size, and closes it.
- func (w *Writer) finalizeTail() error {
- tf := w.tail()
- if tf == nil {
- return nil
- }
-
- if err := w.wbuf.Flush(); err != nil {
- return err
- }
- if err := tf.Sync(); err != nil {
- return err
- }
- // As the file was pre-allocated, we truncate any superfluous zero bytes.
- off, err := tf.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- if err := tf.Truncate(off); err != nil {
- return err
- }
-
- return tf.Close()
- }
-
- func (w *Writer) cut() error {
- // Sync current tail to disk and close.
- if err := w.finalizeTail(); err != nil {
- return err
- }
-
- n, f, _, err := cutSegmentFile(w.dirFile, MagicChunks, chunksFormatV1, w.segmentSize)
- if err != nil {
- return err
- }
- w.n = int64(n)
-
- w.files = append(w.files, f)
- if w.wbuf != nil {
- w.wbuf.Reset(f)
- } else {
- w.wbuf = bufio.NewWriterSize(f, 8*1024*1024)
- }
-
- return nil
- }
-
- func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) {
- p, seq, err := nextSequenceFile(dirFile.Name())
- if err != nil {
- return 0, nil, 0, errors.Wrap(err, "next sequence file")
- }
- ptmp := p + ".tmp"
- f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666)
- if err != nil {
- return 0, nil, 0, errors.Wrap(err, "open temp file")
- }
- defer func() {
- if returnErr != nil {
- errs := tsdb_errors.NewMulti(returnErr)
- if f != nil {
- errs.Add(f.Close())
- }
- // Calling RemoveAll on a non-existent file does not return error.
- errs.Add(os.RemoveAll(ptmp))
- returnErr = errs.Err()
- }
- }()
- if allocSize > 0 {
- if err = fileutil.Preallocate(f, allocSize, true); err != nil {
- return 0, nil, 0, errors.Wrap(err, "preallocate")
- }
- }
- if err = dirFile.Sync(); err != nil {
- return 0, nil, 0, errors.Wrap(err, "sync directory")
- }
-
- // Write header metadata for new file.
- metab := make([]byte, SegmentHeaderSize)
- binary.BigEndian.PutUint32(metab[:MagicChunksSize], magicNumber)
- metab[4] = chunksFormat
-
- n, err := f.Write(metab)
- if err != nil {
- return 0, nil, 0, errors.Wrap(err, "write header")
- }
- if err := f.Close(); err != nil {
- return 0, nil, 0, errors.Wrap(err, "close temp file")
- }
- f = nil
-
- if err := fileutil.Rename(ptmp, p); err != nil {
- return 0, nil, 0, errors.Wrap(err, "replace file")
- }
-
- f, err = os.OpenFile(p, os.O_WRONLY, 0o666)
- if err != nil {
- return 0, nil, 0, errors.Wrap(err, "open final file")
- }
- // Skip header for further writes.
- if _, err := f.Seek(int64(n), 0); err != nil {
- return 0, nil, 0, errors.Wrap(err, "seek in final file")
- }
- return n, f, seq, nil
- }
-
- func (w *Writer) write(b []byte) error {
- n, err := w.wbuf.Write(b)
- w.n += int64(n)
- return err
- }
-
- // WriteChunks writes as many chunks as possible to the current segment,
- // cuts a new segment when the current segment is full and
- // writes the rest of the chunks in the new segment.
- func (w *Writer) WriteChunks(chks ...Meta) error {
- var (
- batchSize = int64(0)
- batchStart = 0
- batches = make([][]Meta, 1)
- batchID = 0
- firstBatch = true
- )
-
- for i, chk := range chks {
- // Each chunk contains: data length + encoding + the data itself + crc32
- chkSize := int64(MaxChunkLengthFieldSize) // The data length is a variable length field so use the maximum possible value.
- chkSize += ChunkEncodingSize // The chunk encoding.
- chkSize += int64(len(chk.Chunk.Bytes())) // The data itself.
- chkSize += crc32.Size // The 4 bytes of crc32.
- batchSize += chkSize
-
- // Cut a new batch when it is not the first chunk(to avoid empty segments) and
- // the batch is too large to fit in the current segment.
- cutNewBatch := (i != 0) && (batchSize+SegmentHeaderSize > w.segmentSize)
-
- // When the segment already has some data than
- // the first batch size calculation should account for that.
- if firstBatch && w.n > SegmentHeaderSize {
- cutNewBatch = batchSize+w.n > w.segmentSize
- if cutNewBatch {
- firstBatch = false
- }
- }
-
- if cutNewBatch {
- batchStart = i
- batches = append(batches, []Meta{})
- batchID++
- batchSize = chkSize
- }
- batches[batchID] = chks[batchStart : i+1]
- }
-
- // Create a new segment when one doesn't already exist.
- if w.n == 0 {
- if err := w.cut(); err != nil {
- return err
- }
- }
-
- for i, chks := range batches {
- if err := w.writeChunks(chks); err != nil {
- return err
- }
- // Cut a new segment only when there are more chunks to write.
- // Avoid creating a new empty segment at the end of the write.
- if i < len(batches)-1 {
- if err := w.cut(); err != nil {
- return err
- }
- }
- }
- return nil
- }
-
- // writeChunks writes the chunks into the current segment irrespective
- // of the configured segment size limit. A segment should have been already
- // started before calling this.
- func (w *Writer) writeChunks(chks []Meta) error {
- if len(chks) == 0 {
- return nil
- }
-
- seq := uint64(w.seq())
- for i := range chks {
- chk := &chks[i]
-
- chk.Ref = ChunkRef(NewBlockChunkRef(seq, uint64(w.n)))
-
- n := binary.PutUvarint(w.buf[:], uint64(len(chk.Chunk.Bytes())))
-
- if err := w.write(w.buf[:n]); err != nil {
- return err
- }
- w.buf[0] = byte(chk.Chunk.Encoding())
- if err := w.write(w.buf[:1]); err != nil {
- return err
- }
- if err := w.write(chk.Chunk.Bytes()); err != nil {
- return err
- }
-
- w.crc32.Reset()
- if err := chk.writeHash(w.crc32, w.buf[:]); err != nil {
- return err
- }
- if err := w.write(w.crc32.Sum(w.buf[:0])); err != nil {
- return err
- }
- }
- return nil
- }
-
- func (w *Writer) seq() int {
- return len(w.files) - 1
- }
-
- func (w *Writer) Close() error {
- if err := w.finalizeTail(); err != nil {
- return err
- }
-
- // close dir file (if not windows platform will fail on rename)
- return w.dirFile.Close()
- }
-
- // ByteSlice abstracts a byte slice.
- type ByteSlice interface {
- Len() int
- Range(start, end int) []byte
- }
-
- type realByteSlice []byte
-
- func (b realByteSlice) Len() int {
- return len(b)
- }
-
- func (b realByteSlice) Range(start, end int) []byte {
- return b[start:end]
- }
-
- // Reader implements a ChunkReader for a serialized byte stream
- // of series data.
- type Reader struct {
- // The underlying bytes holding the encoded series data.
- // Each slice holds the data for a different segment.
- bs []ByteSlice
- cs []io.Closer // Closers for resources behind the byte slices.
- size int64 // The total size of bytes in the reader.
- pool chunkenc.Pool
- }
-
- func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, error) {
- cr := Reader{pool: pool, bs: bs, cs: cs}
- for i, b := range cr.bs {
- if b.Len() < SegmentHeaderSize {
- return nil, errors.Wrapf(errInvalidSize, "invalid segment header in segment %d", i)
- }
- // Verify magic number.
- if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks {
- return nil, errors.Errorf("invalid magic number %x", m)
- }
-
- // Verify chunk format version.
- if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 {
- return nil, errors.Errorf("invalid chunk format version %d", v)
- }
- cr.size += int64(b.Len())
- }
- return &cr, nil
- }
-
- // NewDirReader returns a new Reader against sequentially numbered files in the
- // given directory.
- func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) {
- files, err := sequenceFiles(dir)
- if err != nil {
- return nil, err
- }
- if pool == nil {
- pool = chunkenc.NewPool()
- }
-
- var (
- bs []ByteSlice
- cs []io.Closer
- )
- for _, fn := range files {
- f, err := fileutil.OpenMmapFile(fn)
- if err != nil {
- return nil, tsdb_errors.NewMulti(
- errors.Wrap(err, "mmap files"),
- tsdb_errors.CloseAll(cs),
- ).Err()
- }
- cs = append(cs, f)
- bs = append(bs, realByteSlice(f.Bytes()))
- }
-
- reader, err := newReader(bs, cs, pool)
- if err != nil {
- return nil, tsdb_errors.NewMulti(
- err,
- tsdb_errors.CloseAll(cs),
- ).Err()
- }
- return reader, nil
- }
-
- func (s *Reader) Close() error {
- return tsdb_errors.CloseAll(s.cs)
- }
-
- // Size returns the size of the chunks.
- func (s *Reader) Size() int64 {
- return s.size
- }
-
- // Chunk returns a chunk from a given reference.
- func (s *Reader) Chunk(ref ChunkRef) (chunkenc.Chunk, error) {
- sgmIndex, chkStart := BlockChunkRef(ref).Unpack()
-
- if sgmIndex >= len(s.bs) {
- return nil, errors.Errorf("segment index %d out of range", sgmIndex)
- }
-
- sgmBytes := s.bs[sgmIndex]
-
- if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() {
- return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len())
- }
- // With the minimum chunk length this should never cause us reading
- // over the end of the slice.
- c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize)
- chkDataLen, n := binary.Uvarint(c)
- if n <= 0 {
- return nil, errors.Errorf("reading chunk length failed with %d", n)
- }
-
- chkEncStart := chkStart + n
- chkEnd := chkEncStart + ChunkEncodingSize + int(chkDataLen) + crc32.Size
- chkDataStart := chkEncStart + ChunkEncodingSize
- chkDataEnd := chkEnd - crc32.Size
-
- if chkEnd > sgmBytes.Len() {
- return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len())
- }
-
- sum := sgmBytes.Range(chkDataEnd, chkEnd)
- if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil {
- return nil, err
- }
-
- chkData := sgmBytes.Range(chkDataStart, chkDataEnd)
- chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0]
- return s.pool.Get(chunkenc.Encoding(chkEnc), chkData)
- }
-
- func nextSequenceFile(dir string) (string, int, error) {
- files, err := os.ReadDir(dir)
- if err != nil {
- return "", 0, err
- }
-
- i := uint64(0)
- for _, f := range files {
- j, err := strconv.ParseUint(f.Name(), 10, 64)
- if err != nil {
- continue
- }
- // It is not necessary that we find the files in number order,
- // for example with '1000000' and '200000', '1000000' would come first.
- // Though this is a very very race case, we check anyway for the max id.
- if j > i {
- i = j
- }
- }
- return segmentFile(dir, int(i+1)), int(i + 1), nil
- }
-
- func segmentFile(baseDir string, index int) string {
- return filepath.Join(baseDir, fmt.Sprintf("%0.6d", index))
- }
-
- func sequenceFiles(dir string) ([]string, error) {
- files, err := os.ReadDir(dir)
- if err != nil {
- return nil, err
- }
- var res []string
- for _, fi := range files {
- if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
- continue
- }
- res = append(res, filepath.Join(dir, fi.Name()))
- }
- return res, nil
- }
--- 45,47 ----
diff -c 'vendor/github.com/prometheus/prometheus/util/stats/query_stats.go' '_vendor/github.com/prometheus/prometheus/util/stats/query_stats.go'
Index: ./github.com/prometheus/prometheus/util/stats/query_stats.go
*** ./github.com/prometheus/prometheus/util/stats/query_stats.go Mon Mar 11 19:34:50 2024
--- ./github.com/prometheus/prometheus/util/stats/query_stats.go Thu Oct 26 15:21:07 2023
***************
*** 19,26 ****
"fmt"
"github.com/prometheus/client_golang/prometheus"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/trace"
)
// QueryTiming identifies the code area or functionality in which time is spent
--- 19,24 ----
***************
*** 189,219 ****
// SpanTimer unifies tracing and timing, to reduce repetition.
type SpanTimer struct {
- timer *Timer
- observers []prometheus.Observer
- span trace.Span
}
func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
- ctx, span := otel.Tracer("").Start(ctx, operation)
- timer.Start()
return &SpanTimer{
- timer: timer,
- observers: observers,
-
- span: span,
}, ctx
}
func (s *SpanTimer) Finish() {
- s.timer.Stop()
- s.span.End()
-
- for _, obs := range s.observers {
- obs.Observe(s.timer.ElapsedTime().Seconds())
- }
}
type Statistics struct {
--- 187,202 ----
#### End of Patch data ####
#### ApplyPatch data follows ####
# Data version : 1.0
# Date generated : Mon Mar 11 19:35:07 2024
# Generated by : makepatch 2.03
# Recurse directories : Yes
# Excluded files : (\A|/).*\~\Z
# (\A|/).*\.a\Z
# (\A|/).*\.bak\Z
# (\A|/).*\.BAK\Z
# (\A|/).*\.elc\Z
# (\A|/).*\.exe\Z
# (\A|/).*\.gz\Z
# (\A|/).*\.ln\Z
# (\A|/).*\.o\Z
# (\A|/).*\.obj\Z
# (\A|/).*\.olb\Z
# (\A|/).*\.old\Z
# (\A|/).*\.orig\Z
# (\A|/).*\.rej\Z
# (\A|/).*\.so\Z
# (\A|/).*\.Z\Z
# (\A|/)\.del\-.*\Z
# (\A|/)\.make\.state\Z
# (\A|/)\.nse_depinfo\Z
# (\A|/)core\Z
# (\A|/)tags\Z
# (\A|/)TAGS\Z
# r 'github.com/prometheus/prometheus/util/teststorage/storage.go' 2504 0
# r 'github.com/prometheus/prometheus/tsdb/wal/watcher.go' 19515 0
# r 'github.com/prometheus/prometheus/tsdb/wal/wal.go' 25923 0
# r 'github.com/prometheus/prometheus/tsdb/wal/reader.go' 5511 0
# r 'github.com/prometheus/prometheus/tsdb/wal/live_reader.go' 10173 0
# r 'github.com/prometheus/prometheus/tsdb/wal/checkpoint.go' 9233 0
# r 'github.com/prometheus/prometheus/tsdb/wal.go' 32167 0
# r 'github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go' 2641 0
# r 'github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go' 2672 0
# r 'github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go' 2057 0
# r 'github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go' 4706 0
# r 'github.com/prometheus/prometheus/tsdb/tsdbblockutil.go' 2118 0
# r 'github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go' 9309 0
# r 'github.com/prometheus/prometheus/tsdb/repair.go' 4149 0
# r 'github.com/prometheus/prometheus/tsdb/record/record.go' 8531 0
# r 'github.com/prometheus/prometheus/tsdb/querier.go' 24151 0
# r 'github.com/prometheus/prometheus/tsdb/isolation.go' 7510 0
# r 'github.com/prometheus/prometheus/tsdb/index/postingsstats.go' 1515 0
# r 'github.com/prometheus/prometheus/tsdb/index/postings.go' 23016 0
# r 'github.com/prometheus/prometheus/tsdb/index/index.go' 47541 0
# r 'github.com/prometheus/prometheus/tsdb/head_wal.go' 29645 0
# r 'github.com/prometheus/prometheus/tsdb/head_read.go' 14357 0
# r 'github.com/prometheus/prometheus/tsdb/head_append.go' 18505 0
# r 'github.com/prometheus/prometheus/tsdb/head.go' 54668 0
# r 'github.com/prometheus/prometheus/tsdb/goversion/init.go' 721 0
# r 'github.com/prometheus/prometheus/tsdb/goversion/goversion.go' 771 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/sync_linux.go' 932 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/sync_darwin.go' 830 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/sync.go' 826 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_other.go' 857 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go' 1403 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate_darwin.go' 1138 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/preallocate.go' 1535 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_windows.go' 1460 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_unix.go' 914 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_js.go' 833 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_arm64.go' 692 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_amd64.go' 692 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap_386.go' 686 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/mmap.go' 1530 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock_windows.go' 1111 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock_unix.go' 1346 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock_solaris.go' 1338 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock_plan9.go' 921 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go' 926 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/flock.go' 1358 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go' 3108 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/dir_windows.go' 1465 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/dir_unix.go' 776 0
# r 'github.com/prometheus/prometheus/tsdb/fileutil/dir.go' 915 0
# r 'github.com/prometheus/prometheus/tsdb/exemplar.go' 13312 0
# r 'github.com/prometheus/prometheus/tsdb/errors/errors.go' 2359 0
# r 'github.com/prometheus/prometheus/tsdb/encoding/encoding.go' 7768 0
# r 'github.com/prometheus/prometheus/tsdb/db.go' 54267 0
# r 'github.com/prometheus/prometheus/tsdb/compact.go' 23042 0
# r 'github.com/prometheus/prometheus/tsdb/chunks/queue.go' 4103 0
# r 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks_windows.go' 841 0
# r 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks_other.go' 1027 0
# r 'github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go' 33402 0
# r 'github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go' 7635 0
# r 'github.com/prometheus/prometheus/tsdb/blockwriter.go' 3900 0
# r 'github.com/prometheus/prometheus/tsdb/block.go' 19310 0
# r 'github.com/prometheus/prometheus/tsdb/README.md' 1520 0
# r 'github.com/prometheus/prometheus/tsdb/CHANGELOG.md' 6690 0
# r 'github.com/prometheus/prometheus/tsdb/.gitignore' 10 0
# r 'github.com/prometheus/prometheus/storage/series.go' 8594 0
# r 'github.com/prometheus/prometheus/storage/secondary.go' 3907 0
# r 'github.com/prometheus/prometheus/storage/merge.go' 21223 0
# r 'github.com/prometheus/prometheus/storage/lazy.go' 1868 0
# r 'github.com/prometheus/prometheus/storage/fanout.go' 5846 0
# r 'github.com/prometheus/prometheus/promql/test.go' 20825 0
# r 'github.com/prometheus/prometheus/promql/query_logger.go' 5342 0
# p 'github.com/alecthomas/participle/v2/validate.go' 1075 1698324773 0100664
# p 'github.com/aws/aws-sdk-go/aws/defaults/defaults.go' 6835 1698322867 0100664
# p 'github.com/aws/aws-sdk-go/aws/request/retryer.go' 8830 1698322867 0100664
# p 'github.com/davecgh/go-spew/spew/bypass.go' 4715 1698322867 0100664
# p 'github.com/edsrzf/mmap-go/mmap.go' 3653 1698322867 0100664
# p 'github.com/grafana/regexp/backtrack.go' 8998 1710148638 0100664
# p 'github.com/grafana/regexp/exec.go' 12568 1710148627 0100664
# p 'github.com/grafana/regexp/regexp.go' 38456 1710173669 0100664
# p 'github.com/mwitkow/go-conntrack/dialer_reporter.go' 3458 1698322867 0100664
# p 'github.com/mwitkow/go-conntrack/listener_wrapper.go' 3833 1698322867 0100664
# p 'github.com/pquerna/ffjson/fflib/v1/buffer_pool.go' 2368 1710174996 0100664
# c 'github.com/pquerna/ffjson/inception/decoder.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/decoder_tpl.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/encoder.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/encoder_tpl.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/inception.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/reflect.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/tags.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/template.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/inception/writerstack.go' 0 1698319061 0100664
# c 'github.com/pquerna/ffjson/shared/options.go' 0 1698319061 0100664
# p 'github.com/prometheus/client_golang/prometheus/go_collector.go' 8405 1698322867 0100664
# p 'github.com/prometheus/client_golang/prometheus/go_collector_latest.go' 17751 1698322867 0100664
# p 'github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go' 4867 1698322867 0100664
# p 'github.com/prometheus/client_golang/prometheus/registry.go' 31929 1698322867 0100664
# p 'github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go' 10339 1710172049 0100664
# p 'github.com/prometheus/common/model/labels.go' 6074 1710172585 0100664
# p 'github.com/prometheus/common/model/metric.go' 2873 1710172673 0100664
# p 'github.com/prometheus/common/model/silence.go' 2836 1710172049 0100664
# p 'github.com/prometheus/common/model/time.go' 8095 1710172365 0100664
# p 'github.com/prometheus/procfs/cpuinfo.go' 12479 1710172049 0100664
# p 'github.com/prometheus/procfs/mdstat.go' 8590 1710172049 0100664
# p 'github.com/prometheus/procfs/proc_fdinfo.go' 3523 1710172049 0100664
# p 'github.com/prometheus/procfs/proc_limits.go' 4890 1710172040 0100664
# p 'github.com/prometheus/procfs/proc_smaps.go' 3931 1710172049 0100664
# p 'github.com/prometheus/procfs/proc_stat.go' 6165 1698322867 0100664
# p 'github.com/prometheus/procfs/schedstat.go' 3083 1710172049 0100664
# p 'github.com/prometheus/procfs/slab.go' 3584 1710172049 0100664
# p 'github.com/prometheus/procfs/zoneinfo.go' 6375 1710172049 0100664
# p 'github.com/prometheus/prometheus/discovery/registry.go' 7921 1698322867 0100664
# p 'github.com/prometheus/prometheus/promql/engine.go' 83665 1710149215 0100664
# p 'github.com/prometheus/prometheus/promql/functions.go' 38686 1710172627 0100664
# p 'github.com/prometheus/prometheus/promql/parser/parse.go' 22682 1710148883 0100664
# p 'github.com/prometheus/prometheus/storage/generic.go' 3707 1699363760 0100664
# p 'github.com/prometheus/prometheus/tsdb/chunks/chunks.go' 18283 1699364385 0100664
# p 'github.com/prometheus/prometheus/util/stats/query_stats.go' 8691 1698322867 0100664
# R 'github.com/prometheus/prometheus/util/teststorage'
# R 'github.com/prometheus/prometheus/tsdb/wal'
# R 'github.com/prometheus/prometheus/tsdb/tsdbutil'
# R 'github.com/prometheus/prometheus/tsdb/tombstones'
# R 'github.com/prometheus/prometheus/tsdb/record'
# R 'github.com/prometheus/prometheus/tsdb/index'
# R 'github.com/prometheus/prometheus/tsdb/goversion'
# R 'github.com/prometheus/prometheus/tsdb/fileutil'
# R 'github.com/prometheus/prometheus/tsdb/errors'
# R 'github.com/prometheus/prometheus/tsdb/encoding'
# C 'github.com/pquerna/ffjson/inception' 0 1698319061 040775
# C 'github.com/pquerna/ffjson/shared' 0 1698319061 040775
#### End of ApplyPatch data ####
#### End of Patch kit [created: Mon Mar 11 19:35:07 2024] ####
#### Patch checksum: 5480 166486 19883 ####
#### Checksum: 5614 174052 4911 ####