tempo/example/docker-compose/etc/tempo-s3-minio.yaml
Joe Elliott bb7a4ea74c Rename master to main (#655)
* master => main

Signed-off-by: Joe Elliott <number101010@gmail.com>

* master => main

Signed-off-by: Joe Elliott <number101010@gmail.com>

* master => main

Signed-off-by: Joe Elliott <number101010@gmail.com>
2021-04-20 08:03:37 -04:00

56 lines
2.4 KiB
YAML

auth_enabled: false
server:
http_listen_port: 3100
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # for a production deployment you should only enable the receivers you need!
thrift_binary:
thrift_compact:
zipkin:
otlp:
protocols:
http:
grpc:
opencensus:
ingester:
trace_idle_period: 10s # the length of time after a trace has not received spans to consider it complete and flush it
max_block_bytes: 1_000_000 # cut the head block when it hits this size or ...
max_block_duration: 5m # this much time passes
compactor:
compaction:
compaction_window: 1h # blocks in this time window will be compacted together
max_block_bytes: 100_000_000 # maximum size of compacted blocks
block_retention: 1h
compacted_block_retention: 10m
flush_size_bytes: 5242880
storage:
trace:
backend: s3 # backend configuration to use
block:
bloom_filter_false_positive: .05 # bloom filter false positive rate. lower values create larger filters but fewer false positives
index_downsample_bytes: 1000 # number of bytes per index record
encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd
wal:
path: /tmp/tempo/wal # where to store the the wal locally
encoding: none # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd
s3:
bucket: tempo # how to store data in s3
endpoint: minio:9000
access_key: tempo
secret_key: supersecret
insecure: true
# For using AWS, select the appropriate regional endpoint and region
# endpoint: s3.dualstack.us-west-2.amazonaws.com
# region: us-west-2
pool:
max_workers: 100 # the worker pool mainly drives querying, but is also used for polling the blocklist
queue_depth: 10000