Compare commits

..

2 Commits

Author SHA1 Message Date
5170341b3e Disabled redis when generating static yaml resources except core (#1099)
* Disabled redis when generating static yaml resources except core

* mitigate cloudbuild error
2020-02-03 18:32:57 -08:00
1dbbfd9326 Release 0.9 (#1098) 2020-02-03 16:28:18 -08:00
284 changed files with 7702 additions and 11432 deletions

View File

@ -1,16 +0,0 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
If this is your first time, please read our contributor guidelines: https://github.com/googleforgames/open-match/blob/master/CONTRIBUTING.md and developer guide https://github.com/googleforgames/open-match/blob/master/docs/development.md
-->
**What this PR does / Why we need it**:
**Which issue(s) this PR fixes**:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Closes #<issue number>`, or `Closes (paste link of issue)`.
-->
Closes #
**Special notes for your reviewer**:

View File

@ -171,10 +171,17 @@ linters:
- funlen
- gochecknoglobals
- goconst
- gocritic
- gocyclo
- gofmt
- goimports
- gosec
- interfacer # deprecated - "A tool that suggests interfaces is prone to bad suggestions"
- lll
- prealloc
- scopelint
- staticcheck
- stylecheck
#linters:
# enable-all: true

View File

@ -13,7 +13,7 @@
# limitations under the License.
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
FROM golang:1.14.0
FROM golang:1.13.4
ENV GO111MODULE=on
WORKDIR /go/src/open-match.dev/open-match

View File

@ -34,13 +34,13 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-stretch" && \
apt-get update -y && apt-get install google-cloud-sdk google-cloud-sdk-app-engine-go -y -qq
# Install Golang
# https://github.com/docker-library/golang/blob/master/1.14/stretch/Dockerfile
# https://github.com/docker-library/golang/blob/master/1.13/stretch/Dockerfile
RUN mkdir -p /toolchain/golang
WORKDIR /toolchain/golang
RUN sudo rm -rf /usr/local/go/
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
RUN curl -L https://golang.org/dl/go1.14.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
RUN curl -L https://golang.org/dl/go1.13.4.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH

178
Makefile
View File

@ -52,7 +52,7 @@
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 1.1.0
BASE_VERSION = 0.9.0
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -67,8 +67,6 @@ MINIKUBE_VERSION = latest
GOLANGCI_VERSION = 1.18.0
KIND_VERSION = 0.5.1
SWAGGERUI_VERSION = 3.24.2
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
GRPC_GATEWAY_VERSION = 1.14.3
TERRAFORM_VERSION = 0.12.13
CHART_TESTING_VERSION = 2.4.0
@ -79,6 +77,7 @@ ENABLE_SECURITY_HARDENING = 0
GO = GO111MODULE=on go
# Defines the absolute local directory of the open-match project
REPOSITORY_ROOT := $(patsubst %/,%,$(dir $(abspath $(MAKEFILE_LIST))))
GO_BUILD_COMMAND = CGO_ENABLED=0 $(GO) build -a -installsuffix cgo .
BUILD_DIR = $(REPOSITORY_ROOT)/build
TOOLCHAIN_DIR = $(BUILD_DIR)/toolchain
TOOLCHAIN_BIN = $(TOOLCHAIN_DIR)/bin
@ -123,7 +122,7 @@ GCLOUD = gcloud --quiet
OPEN_MATCH_HELM_NAME = open-match
OPEN_MATCH_KUBERNETES_NAMESPACE = open-match
OPEN_MATCH_SECRETS_DIR = $(REPOSITORY_ROOT)/install/helm/open-match/secrets
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep ACTIVE -a2 | grep account: | cut -c 10-)
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep account: | cut -c 10-)
_GCB_POST_SUBMIT ?= 0
# Latest version triggers builds of :latest images.
_GCB_LATEST_VERSION ?= undefined
@ -197,7 +196,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel base-build
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-pool evaluator-go-simple base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -215,9 +214,6 @@ local-cloud-build: gcloud
## "openmatch-" prefix on the image name and tags.
##
list-images:
@echo $(IMAGES)
#######################################
## build-images / build-<image name>-image: builds images locally
##
@ -240,6 +236,12 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
build-mmf-go-soloduel-image: docker build-base-build-image
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
build-mmf-go-pool-image: docker build-base-build-image
docker build -f test/matchfunction/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-pool:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-pool:$(ALTERNATE_TAG) .
build-evaluator-go-simple-image: docker build-base-build-image
docker build -f test/evaluator/Dockerfile -t $(REGISTRY)/openmatch-evaluator-go-simple:$(TAG) -t $(REGISTRY)/openmatch-evaluator-go-simple:$(ALTERNATE_TAG) .
#######################################
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
@ -285,7 +287,7 @@ $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image): clean-%-image:
#####################################################################################################################
update-chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/stable; $(HELM) dependency update)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com; $(HELM) dependency update)
lint-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/ct$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm; $(HELM) lint $(OPEN_MATCH_HELM_NAME))
@ -362,16 +364,12 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
$(HELM) upgrade $(OPEN_MATCH_HELM_NAME) $(HELM_UPGRADE_FLAGS) --atomic install/helm/open-match $(HELM_IMAGE_FLAGS) \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1 \
--set evaluator.hostName=open-match-test \
--set evaluator.grpcPort=50509 \
--set evaluator.httpPort=51509 \
--set open-match-core.registrationInterval=200ms \
--set open-match-core.proposalCollectionInterval=200ms \
--set open-match-core.assignedDeleteTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=200ms \
--set open-match-core.queryPageSize=10 \
--set global.gcpProjectId=intentionally-invalid-value \
--set open-match-core.ignoreListTTL=500ms \
--set open-match-customize.enabled=true \
--set open-match-customize.function.enabled=true \
--set open-match-customize.evaluator.enabled=true \
--set open-match-customize.function.image=openmatch-mmf-go-pool \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1,open-match-customize.evaluator.replicas=1,open-match-customize.function.replicas=1 \
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
--set ci=true
@ -383,18 +381,11 @@ delete-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubec
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)-demo
ifneq ($(BASE_VERSION), 0.0.0-dev)
install/yaml/: REGISTRY = gcr.io/$(OPEN_MATCH_PUBLIC_IMAGES_PROJECT_ID)
install/yaml/: TAG = $(BASE_VERSION)
endif
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
# We have to hard-code the Jaeger endpoints as we are excluding Jaeger, so Helm cannot determine the endpoints from the Jaeger subchart
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
--set-string global.telemetry.jaeger.agentEndpoint="$(OPEN_MATCH_HELM_NAME)-jaeger-agent:6831" \
--set-string global.telemetry.jaeger.collectorEndpoint="http://$(OPEN_MATCH_HELM_NAME)-jaeger-collector:14268/api/traces" \
install/helm/open-match > install/yaml/01-open-match-core.yaml
install/yaml/02-open-match-demo.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -412,7 +403,6 @@ install/yaml/03-prometheus-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set global.telemetry.prometheus.enabled=true \
install/helm/open-match > install/yaml/03-prometheus-chart.yaml
# We have to hard-code the Prometheus Server URL as we are excluding Prometheus, so Helm cannot determine the URL from the Prometheus subchart
install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
@ -420,7 +410,6 @@ install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set open-match-core.redis.enabled=false \
--set open-match-telemetry.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set-string global.telemetry.grafana.prometheusServer="http://$(OPEN_MATCH_HELM_NAME)-prometheus-server.$(OPEN_MATCH_KUBERNETES_NAMESPACE).svc.cluster.local:80/" \
install/helm/open-match > install/yaml/04-grafana-chart.yaml
install/yaml/05-jaeger-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -467,7 +456,7 @@ set-redis-password:
read REDIS_PASSWORD; \
stty echo; \
printf "\n"; \
$(KUBECTL) create secret generic open-match-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
$(KUBECTL) create secret generic om-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
install-toolchain: install-kubernetes-tools install-protoc-tools install-openmatch-tools
install-kubernetes-tools: build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/terraform$(EXE_EXTENSION)
@ -549,13 +538,13 @@ build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
build/toolchain/bin/certgen$(EXE_EXTENSION):
build/toolchain/bin/certgen$(EXE_EXTENSION): tools/certgen/certgen$(EXE_EXTENSION)
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/certgen/
cp -f $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION) $(CERTGEN)
build/toolchain/bin/reaper$(EXE_EXTENSION):
build/toolchain/bin/reaper$(EXE_EXTENSION): tools/reaper/reaper$(EXE_EXTENSION)
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/reaper/
cp -f $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/reaper$(EXE_EXTENSION)
# Fake target for docker
docker: no-sudo
@ -605,10 +594,7 @@ get-kind-kubeconfig: build/toolchain/bin/kind$(EXE_EXTENSION)
delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(KIND) delete cluster
create-cluster-role-binding:
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
create-gke-cluster: GKE_VERSION = 1.15.12-gke.20 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_VERSION = 1.14.8-gke.17 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
@ -617,8 +603,7 @@ create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
--cluster-version $(GKE_VERSION) \
--image-type cos_containerd \
--tags open-match
$(MAKE) create-cluster-role-binding
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
delete-gke-cluster: gcloud
-$(GCLOUD) $(GCP_PROJECT_FLAG) container clusters delete $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GCLOUD_EXTRA_FLAGS)
@ -668,11 +653,16 @@ api/api.md: third_party/ build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION)
$(PROTOC) api/*.proto \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--doc_out=. \
--doc_opt=markdown,api_temp.md
--doc_opt=markdown,api.md
# Crazy hack that insert hugo link reference to this API doc -)
cat ./docs/hugo_apiheader.txt ./api_temp.md >> api.md
mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
rm ./api_temp.md
$(SED_REPLACE) '1 i\---\
title: "Open Match API References" \
linkTitle: "Open Match API References" \
weight: 2 \
description: \
This document provides API references for Open Match services. \
--- \
' ./api.md && mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
# Include structure of the protos needs to be called out do the dependency chain is run through properly.
pkg/pb/backend.pb.go: pkg/pb/messages.pb.go
@ -686,28 +676,9 @@ build: assets
$(GO) build ./...
$(GO) build -tags e2ecluster ./...
define test_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./... && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./... \
)
$(foreach dir, $(wildcard $(1)/*/.), $(call test_folder, $(dir)))
endef
define fast_test_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) test ./... \
)
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
endef
test: $(ALL_PROTOS) tls-certs third_party/
$(call test_folder,.)
fasttest: $(ALL_PROTOS) tls-certs third_party/
$(call fast_test_folder,.)
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./...
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./...
test-e2e-cluster: all-protos tls-certs third_party/
$(HELM) test --timeout 7m30s -v 0 --logs -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(OPEN_MATCH_HELM_NAME)
@ -752,6 +723,58 @@ build/cmd/demo-%/COPY_PHONY:
mkdir -p $(BUILD_DIR)/cmd/demo-$*/
cp -r examples/demo/static $(BUILD_DIR)/cmd/demo-$*/static
all: service-binaries example-binaries tools-binaries
service-binaries: cmd/minimatch/minimatch$(EXE_EXTENSION) cmd/swaggerui/swaggerui$(EXE_EXTENSION)
service-binaries: cmd/backend/backend$(EXE_EXTENSION) cmd/frontend/frontend$(EXE_EXTENSION)
service-binaries: cmd/query/query$(EXE_EXTENSION) cmd/synchronizer/synchronizer$(EXE_EXTENSION)
example-binaries: example-mmf-binaries example-evaluator-binaries
example-mmf-binaries: examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
example-evaluator-binaries: test/evaluator/evaluator$(EXE_EXTENSION)
examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/examples/functions/golang/soloduel; $(GO_BUILD_COMMAND)
test/matchfunction/matchfunction$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/test/matchfunction; $(GO_BUILD_COMMAND)
test/evaluator/evaluator$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cd $(REPOSITORY_ROOT)/test/evaluator; $(GO_BUILD_COMMAND)
tools-binaries: tools/certgen/certgen$(EXE_EXTENSION) tools/reaper/reaper$(EXE_EXTENSION)
cmd/backend/backend$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
cd $(REPOSITORY_ROOT)/cmd/backend; $(GO_BUILD_COMMAND)
cmd/frontend/frontend$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
cd $(REPOSITORY_ROOT)/cmd/frontend; $(GO_BUILD_COMMAND)
cmd/query/query$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
cd $(REPOSITORY_ROOT)/cmd/query; $(GO_BUILD_COMMAND)
cmd/synchronizer/synchronizer$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
cd $(REPOSITORY_ROOT)/cmd/synchronizer; $(GO_BUILD_COMMAND)
# Note: This list of dependencies is long but only add file references here. If you add a .PHONY dependency make will always rebuild it.
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/messages.pb.go
cmd/minimatch/minimatch$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
cd $(REPOSITORY_ROOT)/cmd/minimatch; $(GO_BUILD_COMMAND)
cmd/swaggerui/swaggerui$(EXE_EXTENSION): third_party/swaggerui/
cd $(REPOSITORY_ROOT)/cmd/swaggerui; $(GO_BUILD_COMMAND)
tools/certgen/certgen$(EXE_EXTENSION):
cd $(REPOSITORY_ROOT)/tools/certgen/ && $(GO_BUILD_COMMAND)
tools/reaper/reaper$(EXE_EXTENSION):
cd $(REPOSITORY_ROOT)/tools/reaper/ && $(GO_BUILD_COMMAND)
build/policies/binauthz.yaml: install/policies/binauthz.yaml
mkdir -p $(BUILD_DIR)/policies
cp -f $(REPOSITORY_ROOT)/install/policies/binauthz.yaml $(BUILD_DIR)/policies/binauthz.yaml
@ -808,7 +831,7 @@ ci-reap-namespaces: build/toolchain/bin/reaper$(EXE_EXTENSION)
# For presubmit we want to update the protobuf generated files and verify that tests are good.
presubmit: GOLANG_TEST_COUNT = 5
presubmit: clean third_party/ update-chart-deps assets update-deps lint build test md-test terraform-test
presubmit: clean third_party/ update-chart-deps assets update-deps lint build install-toolchain test md-test terraform-test
build/release/: presubmit clean-install-yaml install/yaml/
mkdir -p $(BUILD_DIR)/release/
@ -842,6 +865,19 @@ clean-protos:
rm -rf $(REPOSITORY_ROOT)/pkg/pb/
rm -rf $(REPOSITORY_ROOT)/internal/ipb/
clean-binaries:
rm -rf $(REPOSITORY_ROOT)/cmd/backend/backend$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/synchronizer/synchronizer$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/frontend/frontend$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/query/query$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/minimatch/minimatch$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/test/matchfunction/matchfunction$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/test/evaluator/evaluator$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/swaggerui/swaggerui$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION)
clean-terraform:
rm -rf $(REPOSITORY_ROOT)/install/terraform/.terraform/
@ -866,7 +902,7 @@ clean-swagger-docs:
clean-third-party:
rm -rf $(REPOSITORY_ROOT)/third_party/
clean: clean-images clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
clean: clean-images clean-binaries clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
proxy-frontend: build/toolchain/bin/kubectl$(EXE_EXTENSION)
@echo "Frontend Health: http://localhost:$(FRONTEND_PORT)/healthz"
@ -933,18 +969,18 @@ third_party/google/api:
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/google/api
mkdir -p $(REPOSITORY_ROOT)/third_party/google/rpc
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/$(GOOGLE_APIS_VERSION).zip
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/master.zip
(cd $(TOOLCHAIN_DIR)/googleapis-temp/; unzip -q -o googleapis.zip)
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
third_party/protoc-gen-swagger/options:
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/master.zip
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-master/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
third_party/swaggerui/:

View File

@ -24,6 +24,10 @@ The [Open Match Development guide](docs/development.md) has detailed instruction
on getting the source code, making changes, testing and submitting a pull request
to Open Match.
## Disclaimer
This software is currently alpha, and subject to change.
## Support
* [Slack Channel](https://open-match.slack.com/) ([Signup](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLTM5ZWQxNjc1YWI3MzJmN2RiMWJmYWI0ZjFiNzNkZmNkMWQ3YWU5OGVkNzA5Yzc4OGVkOGU5MTc0OTA5ZTA5NDU))

View File

@ -88,12 +88,7 @@ message ReleaseTicketsRequest{
message ReleaseTicketsResponse {}
message ReleaseAllTicketsRequest{}
message ReleaseAllTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup{
message AssignTicketsRequest {
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
repeated string ticket_ids = 1;
@ -101,34 +96,13 @@ message AssignmentGroup{
Assignment assignment = 2;
}
// AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status.
message AssignmentFailure {
enum Cause {
UNKNOWN = 0;
TICKET_NOT_FOUND = 1;
}
string ticket_id = 1;
Cause cause = 2;
}
message AssignTicketsRequest {
// Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied.
repeated AssignmentGroup assignments = 1;
}
message AssignTicketsResponse {
// Failures is a list of all the Tickets that failed assignment along with the cause of failure.
repeated AssignmentFailure failures = 1;
}
message AssignTicketsResponse {}
// The BackendService implements APIs to generate matches and handle ticket assignments.
service BackendService {
// FetchMatches triggers a MatchFunction with the specified MatchProfile and
// returns a set of matches generated by the Match Making Function, and
// accepted by the evaluator.
// Tickets in matches returned by FetchMatches are moved from active to
// pending, and will not be returned by query.
// FetchMatches triggers a MatchFunction with the specified MatchProfile and returns a set of match proposals that
// match the description of that MatchProfile.
// FetchMatches immediately returns an error if it encounters any execution failures.
rpc FetchMatches(FetchMatchesRequest) returns (stream FetchMatchesResponse) {
option (google.api.http) = {
post: "/v1/backendservice/matches:fetch"
@ -144,8 +118,9 @@ service BackendService {
};
}
// ReleaseTickets moves tickets from the pending state, to the active state.
// This enables them to be returned by query, and find different matches.
// ReleaseTickets removes the submitted tickets from the list that prevents tickets
// that are awaiting assignment from appearing in MMF queries, effectively putting them back into
// the matchmaking pool
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
@ -155,17 +130,4 @@ service BackendService {
body: "*"
};
}
// ReleaseAllTickets moves all tickets from the pending state, to the active
// state. This enables them to be returned by query, and find different
// matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {
option (google.api.http) = {
post: "/v1/backendservice/tickets:releaseall"
body: "*"
};
}
}

View File

@ -26,7 +26,7 @@
"paths": {
"/v1/backendservice/matches:fetch": {
"post": {
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and returns a set of match proposals that \nmatch the description of that MatchProfile.\nFetchMatches immediately returns an error if it encounters any execution failures.",
"operationId": "FetchMatches",
"responses": {
"200": {
@ -94,7 +94,7 @@
},
"/v1/backendservice/tickets:release": {
"post": {
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
"summary": "ReleaseTickets removes the submitted tickets from the list that prevents tickets \nthat are awaiting assignment from appearing in MMF queries, effectively putting them back into\nthe matchmaking pool",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseTickets",
"responses": {
@ -126,75 +126,27 @@
"BackendService"
]
}
},
"/v1/backendservice/tickets:releaseall": {
"post": {
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseAllTickets",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchReleaseAllTicketsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchReleaseAllTicketsRequest"
}
}
],
"tags": [
"BackendService"
]
}
}
},
"definitions": {
"AssignmentFailureCause": {
"type": "string",
"enum": [
"UNKNOWN",
"TICKET_NOT_FOUND"
],
"default": "UNKNOWN"
},
"openmatchAssignTicketsRequest": {
"type": "object",
"properties": {
"assignments": {
"ticket_ids": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchAssignmentGroup"
"type": "string"
},
"description": "Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied."
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
}
}
},
"openmatchAssignTicketsResponse": {
"type": "object",
"properties": {
"failures": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchAssignmentFailure"
},
"description": "Failures is a list of all the Tickets that failed assignment along with the cause of failure."
}
}
"type": "object"
},
"openmatchAssignment": {
"type": "object",
@ -211,36 +163,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchAssignmentFailure": {
"type": "object",
"properties": {
"ticket_id": {
"type": "string"
},
"cause": {
"$ref": "#/definitions/AssignmentFailureCause"
}
},
"description": "AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status."
},
"openmatchAssignmentGroup": {
"type": "object",
"properties": {
"ticket_ids": {
"type": "array",
"items": {
"type": "string"
},
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
}
},
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchDoubleRangeFilter": {
"type": "object",
@ -376,7 +299,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -389,25 +312,8 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchReleaseAllTicketsRequest": {
"type": "object"
},
"openmatchReleaseAllTicketsResponse": {
"type": "object"
}
},
"openmatchReleaseTicketsRequest": {
"type": "object",
@ -483,7 +389,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -495,14 +401,9 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"protobufAny": {
"type": "object",

View File

@ -76,7 +76,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchEvaluateRequest": {
"type": "object",
@ -165,7 +165,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -177,14 +177,9 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"protobufAny": {
"type": "object",

View File

@ -20,7 +20,6 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "google/protobuf/empty.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
@ -61,22 +60,29 @@ message CreateTicketRequest {
Ticket ticket = 1;
}
message CreateTicketResponse {
// A Ticket object with TicketId generated.
Ticket ticket = 1;
}
message DeleteTicketRequest {
// A TicketId of a generated Ticket to be deleted.
string ticket_id = 1;
}
message DeleteTicketResponse {}
message GetTicketRequest {
// A TicketId of a generated Ticket.
string ticket_id = 1;
}
message WatchAssignmentsRequest {
message GetAssignmentsRequest {
// A TicketId of a generated Ticket to get updates on.
string ticket_id = 1;
}
message WatchAssignmentsResponse {
message GetAssignmentsResponse {
// An updated Assignment of the requested Ticket.
Assignment assignment = 1;
}
@ -87,7 +93,7 @@ service FrontendService {
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
rpc CreateTicket(CreateTicketRequest) returns (Ticket) {
rpc CreateTicket(CreateTicketRequest) returns (CreateTicketResponse) {
option (google.api.http) = {
post: "/v1/frontendservice/tickets"
body: "*"
@ -95,8 +101,10 @@ service FrontendService {
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client should delete the Ticket when finished matchmaking with it.
rpc DeleteTicket(DeleteTicketRequest) returns (google.protobuf.Empty) {
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
rpc DeleteTicket(DeleteTicketRequest) returns (DeleteTicketResponse) {
option (google.api.http) = {
delete: "/v1/frontendservice/tickets/{ticket_id}"
};
@ -109,10 +117,10 @@ service FrontendService {
};
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
rpc WatchAssignments(WatchAssignmentsRequest)
returns (stream WatchAssignmentsResponse) {
rpc GetAssignments(GetAssignmentsRequest)
returns (stream GetAssignmentsResponse) {
option (google.api.http) = {
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
};

View File

@ -32,7 +32,7 @@
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchTicket"
"$ref": "#/definitions/openmatchCreateTicketResponse"
}
},
"404": {
@ -91,13 +91,13 @@
]
},
"delete": {
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client must delete the Ticket when finished matchmaking with it. \n - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.\nUsers may still be able to assign/get a ticket after calling DeleteTicket on it.",
"operationId": "DeleteTicket",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"properties": {}
"$ref": "#/definitions/openmatchDeleteTicketResponse"
}
},
"404": {
@ -124,13 +124,13 @@
},
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
"get": {
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "WatchAssignments",
"summary": "GetAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "GetAssignments",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
"$ref": "#/x-stream-definitions/openmatchGetAssignmentsResponse"
}
},
"404": {
@ -172,7 +172,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchCreateTicketRequest": {
"type": "object",
@ -183,6 +183,27 @@
}
}
},
"openmatchCreateTicketResponse": {
"type": "object",
"properties": {
"ticket": {
"$ref": "#/definitions/openmatchTicket",
"description": "A Ticket object with TicketId generated."
}
}
},
"openmatchDeleteTicketResponse": {
"type": "object"
},
"openmatchGetAssignmentsResponse": {
"type": "object",
"properties": {
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Ticket."
}
}
},
"openmatchSearchFields": {
"type": "object",
"properties": {
@ -220,7 +241,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -232,23 +253,9 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Ticket."
}
}
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"protobufAny": {
"type": "object",
@ -292,17 +299,17 @@
}
},
"x-stream-definitions": {
"openmatchWatchAssignmentsResponse": {
"openmatchGetAssignmentsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
"$ref": "#/definitions/openmatchGetAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
"title": "Stream result of openmatchGetAssignmentsResponse"
}
},
"externalDocs": {

View File

@ -69,7 +69,7 @@ message RunResponse {
// The MatchFunction service implements APIs to run user-defined matchmaking logics.
service MatchFunction {
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
// Run pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then
// Run pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then
// constructs and streams back match candidates to the Backend service.
rpc Run(RunRequest) returns (stream RunResponse) {
option (google.api.http) = {

View File

@ -26,7 +26,7 @@
"paths": {
"/v1/matchfunction:run": {
"post": {
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"operationId": "Run",
"responses": {
"200": {
@ -75,7 +75,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchDoubleRangeFilter": {
"type": "object",
@ -165,7 +165,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -178,19 +178,8 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
}
},
"openmatchRunRequest": {
"type": "object",
@ -269,7 +258,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -281,14 +270,9 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"protobufAny": {
"type": "object",

View File

@ -19,20 +19,17 @@ option csharp_namespace = "OpenMatch";
import "google/rpc/status.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
// A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent
// an individual 'Player', a 'Group' of players, or any other concepts unique to
// your use case. Open Match will not interpret what the Ticket represents but
// just treat it as a matchmaking unit with a set of SearchFields. Open Match
// stores the Ticket in state storage and enables an Assignment to be set on the
// Ticket.
// A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an
// individual 'Player' or a 'Group' of players. Open Match will not interpret
// what the Ticket represents but just treat it as a matchmaking unit with a set
// of SearchFields. Open Match stores the Ticket in state storage and enables an
// Assignment to be associated with this Ticket.
message Ticket {
// Id represents an auto-generated Id issued by Open Match.
string id = 1;
// An Assignment represents a game server assignment associated with a Ticket,
// or whatever finalized matched state means for your use case.
// An Assignment represents a game server assignment associated with a Ticket.
// Open Match does not require or inspect any fields on Assignment.
Assignment assignment = 3;
@ -45,10 +42,6 @@ message Ticket {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 5;
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 6;
// Deprecated fields.
reserved 2;
}
@ -66,8 +59,8 @@ message SearchFields {
repeated string tags = 3;
}
// An Assignment represents a game server assignment associated with a Ticket.
// Open Match does not require or inspect any fields on assignment.
// An Assignment represents a game server assignment associated with a Ticket. Open
// match does not require or inspect any fields on assignment.
message Assignment {
// Connection information for this Assignment.
string connection = 1;
@ -133,13 +126,11 @@ message TagPresentFilter {
string tag = 1;
}
// Pool specfies a set of criteria that are used to select a subset of Tickets
// that meet all the criteria.
message Pool {
// A developer-chosen human-readable name for this Pool.
string name = 1;
// Set of Filters indicating the filtering criteria. Selected tickets must
// Set of Filters indicating the filtering criteria. Selected players must
// match every Filter.
repeated DoubleRangeFilter double_range_filters = 2;
@ -147,12 +138,6 @@ message Pool {
repeated TagPresentFilter tag_present_filters = 5;
// If specified, only Tickets created before the specified time are selected.
google.protobuf.Timestamp created_before = 6;
// If specified, only Tickets created after the specified time are selected.
google.protobuf.Timestamp created_after = 7;
// Deprecated fields.
reserved 3;
}

View File

@ -56,46 +56,25 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
};
message QueryTicketsRequest {
// The Pool representing the set of Filters to be queried.
// A Pool is consists of a set of Filters.
Pool pool = 1;
}
message QueryTicketsResponse {
// Tickets that meet all the filtering criteria requested by the pool.
// Tickets that satisfy all the filtering criteria.
repeated Ticket tickets = 1;
}
message QueryTicketIdsRequest {
// The Pool representing the set of Filters to be queried.
Pool pool = 1;
}
message QueryTicketIdsResponse {
// TicketIDs that meet all the filtering criteria requested by the pool.
repeated string ids = 1;
}
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/tickets:query"
body: "*"
};
}
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/ticketids:query"
body: "*"
};
}
}

View File

@ -24,43 +24,9 @@
"application/json"
],
"paths": {
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchQueryTicketIdsRequest"
}
}
],
"tags": [
"QueryService"
]
}
},
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `storage.pool.size` and stream back response.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000",
"operationId": "QueryTickets",
"responses": {
"200": {
@ -109,7 +75,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchDoubleRangeFilter": {
"type": "object",
@ -143,7 +109,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -156,38 +122,6 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchQueryTicketIdsRequest": {
"type": "object",
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
}
}
},
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"ids": {
"type": "array",
"items": {
"type": "string"
},
"description": "TicketIDs that meet all the filtering criteria requested by the pool."
}
}
},
@ -196,7 +130,7 @@
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
"description": "A Pool is consists of a set of Filters."
}
}
},
@ -208,7 +142,7 @@
"items": {
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets that meet all the filtering criteria requested by the pool."
"description": "Tickets that satisfy all the filtering criteria."
}
}
},
@ -271,7 +205,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -283,14 +217,9 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"protobufAny": {
"type": "object",
@ -334,18 +263,6 @@
}
},
"x-stream-definitions": {
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
},
"openmatchQueryTicketsResponse": {
"type": "object",
"properties": {

View File

@ -153,7 +153,7 @@ steps:
artifacts:
objects:
location: '${_ARTIFACTS_BUCKET}'
location: gs://open-match-build-artifacts/output/
paths:
- install/yaml/install.yaml
- install/yaml/01-open-match-core.yaml
@ -164,12 +164,10 @@ artifacts:
- install/yaml/06-open-match-override-configmap.yaml
substitutions:
_OM_VERSION: "1.1.0"
_OM_VERSION: "0.9.0"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/output/"
_LOGS_BUCKET: "gs://open-match-build-logs/"
logsBucket: '${_LOGS_BUCKET}'
logsBucket: 'gs://open-match-build-logs/'
options:
sourceProvenanceHash: ['SHA256']
machineType: 'N1_HIGHCPU_32'

View File

@ -16,10 +16,11 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/backend"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("backend", backend.BindService)
app.RunApplication("backend", config.Read, backend.BindService)
}

View File

@ -16,10 +16,11 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/frontend"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("frontend", frontend.BindService)
app.RunApplication("frontend", config.Read, frontend.BindService)
}

View File

@ -16,10 +16,11 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/minimatch"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("minimatch", minimatch.BindService)
app.RunApplication("minimatch", config.Read, minimatch.BindService)
}

View File

@ -16,10 +16,11 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/query"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("query", query.BindService)
app.RunApplication("query", config.Read, query.BindService)
}

View File

@ -16,9 +16,10 @@ package main
import (
"open-match.dev/open-match/examples/scale/backend"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("scale", backend.BindService)
app.RunApplication("scale", config.Read, backend.BindService)
}

View File

@ -16,9 +16,10 @@ package main
import (
"open-match.dev/open-match/examples/scale/frontend"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("scale", frontend.BindService)
app.RunApplication("scale", config.Read, frontend.BindService)
}

View File

@ -16,10 +16,11 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/synchronizer"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
)
func main() {
appmain.RunApplication("synchronizer", synchronizer.BindService)
app.RunApplication("synchronizer", config.Read, synchronizer.BindService)
}

View File

@ -9,12 +9,14 @@ To build Open Match you'll need the following applications installed.
* [Git](https://git-scm.com/downloads)
* [Go](https://golang.org/doc/install)
* [Python3 with virtualenv](https://wiki.python.org/moin/BeginnersGuide/Download)
* Make (Mac: install [XCode](https://itunes.apple.com/us/app/xcode/id497799835))
* [Docker](https://docs.docker.com/install/) including the
[post-install steps](https://docs.docker.com/install/linux/linux-postinstall/).
Optional Software
* [Google Cloud Platform](gcloud.md)
* [Visual Studio Code](https://code.visualstudio.com/Download) for IDE.
Vim and Emacs work to.
* [VirtualBox](https://www.virtualbox.org/wiki/Downloads) recommended for
@ -25,7 +27,8 @@ running:
```bash
sudo apt-get update
sudo apt-get install -y -q make google-cloud-sdk git unzip tar
sudo apt-get install -y -q python3 python3-virtualenv virtualenv make \
google-cloud-sdk git unzip tar
```
*It's recommended that you install Go using their instructions because package
@ -48,11 +51,13 @@ make
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
but for purpose of this guide we'll be using the upstream/master.*
## Building code and images
## Building
```bash
# Reset workspace
make clean
# Compile all the binaries
make all -j$(nproc)
# Run tests
make test
# Build all the images.
@ -82,9 +87,11 @@ default context the Makefile will honor that._
# GKE cluster: make create-gke-cluster/delete-gke-cluster
# or create a local Minikube cluster
make create-gke-cluster
# Step 2: Build and Push Open Match Images to gcr.io
# Step 2: Download helm and install Tiller in the cluster
make push-helm
# Step 3: Build and Push Open Match Images to gcr.io
make push-images -j$(nproc)
# Step 3: Install Open Match in the cluster.
# Install Open Match in the cluster.
make install-chart
# Create a proxy to Open Match pods so that you can access them locally.
@ -98,29 +105,12 @@ make proxy
make delete-chart
```
## Iterating
While iterating on the project, you may need to:
1. Install/Run everything
2. Make some code changes
3. Make sure the changes compile by running `make test`
4. Build and push Docker images to your personal registry by running `make push-images -j$(nproc)`
5. Deploy the code change by running `make install-chart`
6. Verify it's working by [looking at the logs](#accessing-logs) or looking at the monitoring dashboard by running `make proxy-grafana`
7. Tear down Open Match by running `make delete-chart`
## Interaction
## Accessing logs
To look at Open Match core services' logs, run:
```bash
# Replace open-match-frontend with the service name that you would like to access
kubectl logs -n open-match svc/open-match-frontend
```
Before integrating with Open Match you can manually interact with it to get a feel for how it works.
## API References
While integrating with Open Match you may want to understand its API surface concepts or interact with it and get a feel for how it works.
The APIs are defined in `proto` format under the `api/` folder, with references available at [open-match.dev](https://open-match.dev/site/docs/reference/api/).
You can also run `make proxy-ui` to exposes the Swagger UI for Open Match locally on your computer after [deploying it to Kubernetes](#deploying-to-kubernetes), then go to http://localhost:51500 and view the REST APIs as well as interactively call Open Match.
`make proxy-ui` exposes the Swagger UI for Open Match locally on your computer.
You can then go to http://localhost:51500 and view the API as well as interactively call Open Match.
By default you will be talking to the frontend server but you can change the target API url to any of the following:
@ -154,9 +144,55 @@ export GOPATH=$HOME/workspace/
## Pull Requests
If you want to submit a Pull Request, `make presubmit` can catch most of the issues your change can run into.
If you want to submit a Pull Request there's some tools to help prepare your
change.
```bash
# Runs code generators, tests, and linters.
make presubmit
```
`make presubmit` catches most of the issues your change can run into. If the
submit checks fail you can run it locally via,
```bash
make local-cloud-build
```
Our [continuous integration](https://console.cloud.google.com/cloud-build/builds?project=open-match-build)
runs against all PRs. In order to see your build results you'll need to
become a member of
[open-match-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/open-match-discuss).
## Makefile
The Makefile is the core of Open Match's build process. There's a lot of
commands but here's a list of the important ones and patterns to remember them.
```bash
# Help
make
# Reset workspace (delete all build artifacts)
make clean
# Delete auto-generated protobuf code and swagger API docs.
make clean-protos clean-swagger-docs
# make clean-* deletes some part of the build outputs.
# Build all Docker images
make build-images
# Build frontend docker image.
make build-frontend-image
# Formats, Vets, and tests the codebase.
make fmt vet test
# Same as above also regenerates autogen files.
make presubmit
# Run website on http://localhost:8080
make run-site
# Proxy all Open Match processes to view them.
make proxy
```

View File

@ -12,13 +12,24 @@ SOURCE_VERSION=$1
DEST_VERSION=$2
SOURCE_PROJECT_ID=open-match-build
DEST_PROJECT_ID=open-match-public-images
IMAGE_NAMES=$(make list-images)
IMAGE_NAMES="openmatch-backend openmatch-frontend openmatch-query openmatch-synchronizer openmatch-minimatch openmatch-demo-first-match openmatch-mmf-go-soloduel openmatch-mmf-go-pool openmatch-evaluator-go-simple openmatch-swaggerui openmatch-reaper"
for name in $IMAGE_NAMES
do
source_image=gcr.io/$SOURCE_PROJECT_ID/openmatch-$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/openmatch-$name:$DEST_VERSION
source_image=gcr.io/$SOURCE_PROJECT_ID/$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION
docker pull $source_image
docker tag $source_image $dest_image
docker push $dest_image
done
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "Add these lines to your release notes:"
for name in $IMAGE_NAMES
do
echo "docker pull gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION"
done

View File

@ -1,7 +0,0 @@
---
title: "Open Match API References"
linkTitle: "Open Match API References"
weight: 2
description:
This document provides API references for Open Match services.
---

View File

@ -81,7 +81,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("open-match-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
conn, err := grpc.Dial("om-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
if err != nil {
panic(err)
}
@ -102,7 +102,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
if err != nil {
panic(err)
}
ticketId = resp.Id
ticketId = resp.Ticket.Id
}
//////////////////////////////////////////////////////////////////////////////
@ -111,11 +111,11 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
var assignment *pb.Assignment
{
req := &pb.WatchAssignmentsRequest{
req := &pb.GetAssignmentsRequest{
TicketId: ticketId,
}
stream, err := fe.WatchAssignments(ctx, req)
stream, err := fe.GetAssignments(ctx, req)
for assignment.GetConnection() == "" {
resp, err := stream.Recv()
if err != nil {

View File

@ -68,7 +68,7 @@ func run(ds *components.DemoShared) {
ds.Update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("open-match-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
conn, err := grpc.Dial("om-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
if err != nil {
panic(err)
}
@ -131,13 +131,9 @@ func run(ds *components.DemoShared) {
}
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
}

View File

@ -24,8 +24,8 @@ import (
)
const (
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
queryServiceAddr = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {

View File

@ -19,11 +19,11 @@ import (
"open-match.dev/open-match/pkg/pb"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
)
func TestMakeMatchesDeduplicate(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}},
@ -31,12 +31,12 @@ func TestMakeMatchesDeduplicate(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
require.Nil(err)
require.Equal(len(matches), 0)
assert.Nil(err)
assert.Equal(len(matches), 0)
}
func TestMakeMatches(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}, {Id: "2"}, {Id: "3"}},
@ -45,11 +45,11 @@ func TestMakeMatches(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
require.Nil(err)
require.Equal(len(matches), 3)
assert.Nil(err)
assert.Equal(len(matches), 3)
for _, match := range matches {
require.Equal(2, len(match.Tickets))
require.Equal(matchName, match.MatchFunction)
assert.Equal(2, len(match.Tickets))
assert.Equal(matchName, match.MatchFunction)
}
}

View File

@ -1,20 +0,0 @@
## How to use this framework
This is the framework that we use to benchmark Open Match against different matchmaking scenarios. For now (02/24/2020), this framework supports a Battle Royale, a Basic 1v1 matchmaking, and a Team Shooter scenario. You are welcome to write up your own `Scenario`, test it, and share the number that you are able to get to us.
1. The `Scenario` struct under the `scenarios/scenarios.go` file defines the parameters that this framework currently support/plan to support.
2. Each subpackage `battleroyal`, `firstmatch`, and `teamshooter` implements to `GameScenario` interface defined under `scenarios/scenarios.go` file. Feel free to write your own benchmark scenario by implementing the interface.
- Ticket `func() *pb.Ticket` - Tickets generator
- Profiles `func() []*pb.MatchProfile` - Profiles generator
- MMF `MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)` - Custom matchmaking logic using a MatchProfile and a map struct that contains the mapping from pool name to the tickets of that pool.
- Evaluate `Evaluate(stream pb.Evaluator_EvaluateServer) error` - Custom logic implementation of the evaluator.
Follow the instructions below if you want to use any of the existing benchmarking scenarios.
1. Open the `scenarios.go` file under the scenarios directory.
2. Change the value of the `ActiveScenario` variable to the scenario that you would like Open Match to run against.
3. Make sure you have `kubectl` connected to an existing Kubernetes cluster and run `make push-images` followed by `make install-scale-chart` to push the images and install Open Match core along with the scale components in the cluster.
4. Run `make proxy`
- Open `localhost:3000` to see the Grafana dashboards.
- Open `localhost:9090` to see the Prometheus query server.
- Open `localhost:[COMPONENT_HTTP_ENDPOINT]/help` to see how to access the zpages.

View File

@ -25,7 +25,6 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"open-match.dev/open-match/examples/scale/scenarios"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
@ -39,6 +38,7 @@ var (
})
activeScenario = scenarios.ActiveScenario
statProcessor = scenarios.NewStatProcessor()
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
@ -54,8 +54,8 @@ var (
// Run triggers execution of functions that continuously fetch, assign and
// delete matches.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
go run(p.Config())
func BindService(p *rpc.ServerParams, cfg config.View) error {
go run(cfg)
return nil
}
@ -76,6 +76,8 @@ func run(cfg config.View) {
defer feConn.Close()
fe := pb.NewFrontendServiceClient(feConn)
startTime := time.Now()
w := logger.Writer()
defer w.Close()
@ -92,6 +94,7 @@ func run(cfg config.View) {
for range time.Tick(time.Millisecond * 250) {
// Keep pulling matches from Open Match backend
profiles := activeScenario.Profiles()
statProcessor.SetStat("TotalProfiles", len(profiles))
var wg sync.WaitGroup
for _, p := range profiles {
@ -104,7 +107,9 @@ func run(cfg config.View) {
// Wait for all profiles to complete before proceeding.
wg.Wait()
statProcessor.SetStat("TimeElapsed", time.Since(startTime).String())
telemetry.RecordUnitMeasurement(context.Background(), mIterations)
statProcessor.Log(w)
}
}
@ -125,7 +130,7 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
stream, err := be.FetchMatches(ctx, req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
logger.WithError(err).Error("failed to get available stream client")
statProcessor.RecordError("failed to get available stream client", err)
return
}
@ -139,12 +144,13 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
logger.WithError(err).Error("failed to get matches from stream client")
statProcessor.RecordError("failed to get matches from stream client", err)
return
}
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
statProcessor.IncrementStat("MatchCount", 1)
matchesForAssignment <- resp.GetMatch()
}
@ -161,22 +167,19 @@ func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.
if activeScenario.BackendAssignsTickets {
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
statProcessor.RecordError("failed to assign tickets", err)
continue
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
statProcessor.IncrementStat("Assigned", len(ids))
}
for _, id := range ids {
@ -198,9 +201,10 @@ func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string)
if err == nil {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
statProcessor.IncrementStat("Deleted", 1)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
logger.WithError(err).Error("failed to delete tickets")
statProcessor.RecordError("failed to delete tickets", err)
}
}
}

View File

@ -16,15 +16,13 @@ package frontend
import (
"context"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"open-match.dev/open-match/examples/scale/scenarios"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
@ -36,18 +34,20 @@ var (
"app": "openmatch",
"component": "scale.frontend",
})
activeScenario = scenarios.ActiveScenario
activeScenario = scenarios.ActiveScenario
statProcessor = scenarios.NewStatProcessor()
numOfRoutineCreate = 8
totalCreated uint32
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
)
// Run triggers execution of the scale frontend component that creates
// tickets at scale in Open Match.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
go run(p.Config())
func BindService(p *rpc.ServerParams, cfg config.View) error {
go run(cfg)
return nil
}
@ -61,92 +61,75 @@ func run(cfg config.View) {
}
fe := pb.NewFrontendServiceClient(conn)
w := logger.Writer()
defer w.Close()
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
totalCreated := 0
for {
currentCreated := int(atomic.LoadUint32(&totalCreated))
if ticketTotal != -1 && currentCreated >= ticketTotal {
break
}
for range time.Tick(time.Second) {
for i := 0; i < ticketQPS; i++ {
if ticketTotal == -1 || totalCreated < ticketTotal {
go runner(fe)
// Each inner loop creates TicketCreatedQPS tickets
var ticketPerRoutine, ticketModRoutine int
start := time.Now()
if ticketTotal == -1 || currentCreated+ticketQPS <= ticketTotal {
ticketPerRoutine = ticketQPS / numOfRoutineCreate
ticketModRoutine = ticketQPS % numOfRoutineCreate
} else {
ticketPerRoutine = (ticketTotal - currentCreated) / numOfRoutineCreate
ticketModRoutine = (ticketTotal - currentCreated) % numOfRoutineCreate
}
var wg sync.WaitGroup
for i := 0; i < numOfRoutineCreate; i++ {
wg.Add(1)
if i < ticketModRoutine {
go createPerCycle(&wg, fe, ticketPerRoutine+1, start)
} else {
go createPerCycle(&wg, fe, ticketPerRoutine, start)
}
}
// Wait for all concurrent creates to complete.
wg.Wait()
statProcessor.SetStat("TotalCreated", atomic.LoadUint32(&totalCreated))
statProcessor.Log(w)
}
}
func runner(fe pb.FrontendServiceClient) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
func createPerCycle(wg *sync.WaitGroup, fe pb.FrontendServiceClient, ticketPerRoutine int, start time.Time) {
defer wg.Done()
cycleCreated := 0
g := stateGauge{}
defer g.stop()
for j := 0; j < ticketPerRoutine; j++ {
req := &pb.CreateTicketRequest{
Ticket: activeScenario.Ticket(),
}
g.start(mRunnersWaiting)
// A random sleep at the start of the worker evens calls out over the second
// period, and makes timing between ticket creation calls a more realistic
// poisson distribution.
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateTicket")
defer span.End()
g.start(mRunnersCreating)
id, err := createTicket(ctx, fe)
if err != nil {
logger.WithError(err).Error("failed to create a ticket")
return
timeLeft := start.Add(time.Second).Sub(time.Now())
if timeLeft <= 0 {
break
}
ticketsLeft := ticketPerRoutine - cycleCreated
time.Sleep(timeLeft / time.Duration(ticketsLeft))
if _, err := fe.CreateTicket(ctx, req); err == nil {
cycleCreated++
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
} else {
statProcessor.RecordError("failed to create a ticket", err)
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
}
}
_ = id
}
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
ctx, span := trace.StartSpan(ctx, "scale.frontend/CreateTicket")
defer span.End()
req := &pb.CreateTicketRequest{
Ticket: activeScenario.Ticket(),
}
resp, err := fe.CreateTicket(ctx, req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
return "", err
}
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
return resp.Id, nil
}
// Allows concurrent moficiation of a gauge value by modifying the concurrent
// value with a delta.
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {
m := sync.Mutex{}
v := int64(0)
return func(delta int64) {
m.Lock()
defer m.Unlock()
v += delta
telemetry.SetGauge(context.Background(), s, v)
}
}
// stateGauge will have a single value be applied to one gauge at a time.
type stateGauge struct {
f func(int64)
}
// start begins a stage measured in a gauge, stopping any previously started
// stage.
func (g *stateGauge) start(f func(int64)) {
g.stop()
g.f = f
f(1)
}
// stop finishes the current stage by decrementing the gauge.
func (g *stateGauge) stop() {
if g.f != nil {
g.f(-1)
g.f = nil
}
atomic.AddUint32(&totalCreated, uint32(cycleCreated))
}

View File

@ -39,7 +39,7 @@ var (
func Run() {
activeScenario := scenarios.ActiveScenario
conn, err := grpc.Dial("open-match-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
conn, err := grpc.Dial("om-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}

View File

@ -0,0 +1,85 @@
package scenarios
import (
"fmt"
"math/rand"
"time"
"open-match.dev/open-match/pkg/pb"
)
const (
battleRoyalRegions = 20
regionArg = "region"
)
var (
battleRoyalScenario = &Scenario{
MMF: queryPoolsWrapper(battleRoyalMmf),
Evaluator: fifoEvaluate,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: battleRoyalTicket,
Profiles: battleRoyalProfile,
}
)
func battleRoyalProfile() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for i := 0; i < battleRoyalRegions; i++ {
p = append(p, &pb.MatchProfile{
Name: battleRoyalRegionName(i),
Pools: []*pb.Pool{
{
Name: poolName,
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: regionArg,
Value: battleRoyalRegionName(i),
},
},
},
},
})
}
return p
}
func battleRoyalTicket() *pb.Ticket {
// Simple way to give an uneven distribution of region population.
a := rand.Intn(battleRoyalRegions) + 1
r := rand.Intn(a)
return &pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
regionArg: battleRoyalRegionName(r),
},
},
}
}
func battleRoyalMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
var matches []*pb.Match
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[i : i+playersInMatch],
MatchProfile: p.GetName(),
MatchFunction: "battleRoyal",
})
}
return matches, nil
}
func battleRoyalRegionName(i int) string {
return fmt.Sprintf("region_%d", i)
}

View File

@ -1,141 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package battleroyal
import (
"fmt"
"io"
"math/rand"
"time"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
regionArg = "region"
)
func battleRoyalRegionName(i int) string {
return fmt.Sprintf("region_%d", i)
}
func Scenario() *BattleRoyalScenario {
return &BattleRoyalScenario{
regions: 20,
}
}
type BattleRoyalScenario struct {
regions int
}
func (b *BattleRoyalScenario) Profiles() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for i := 0; i < b.regions; i++ {
p = append(p, &pb.MatchProfile{
Name: battleRoyalRegionName(i),
Pools: []*pb.Pool{
{
Name: poolName,
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: regionArg,
Value: battleRoyalRegionName(i),
},
},
},
},
})
}
return p
}
func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
// Simple way to give an uneven distribution of region population.
a := rand.Intn(b.regions) + 1
r := rand.Intn(a)
return &pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
regionArg: battleRoyalRegionName(r),
},
},
}
}
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
var matches []*pb.Match
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[i : i+playersInMatch],
MatchProfile: p.GetName(),
MatchFunction: "battleRoyal",
})
}
return matches, nil
}
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}
outer:
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Error reading evaluator input stream: %w", err)
}
m := req.GetMatch()
for _, t := range m.Tickets {
if _, ok := used[t.Id]; ok {
continue outer
}
}
for _, t := range m.Tickets {
used[t.Id] = struct{}{}
}
matchIDs = append(matchIDs, m.GetMatchId())
}
for _, mID := range matchIDs {
err := stream.Send(&pb.EvaluateResponse{MatchId: mID})
if err != nil {
return fmt.Errorf("Error sending evaluator output stream: %w", err)
}
}
return nil
}

View File

@ -1,18 +1,4 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firstmatch
package scenarios
import (
"fmt"
@ -26,14 +12,20 @@ const (
poolName = "all"
)
func Scenario() *FirstMatchScenario {
return &FirstMatchScenario{}
}
var (
firstMatchScenario = &Scenario{
MMF: queryPoolsWrapper(firstMatchMmf),
Evaluator: fifoEvaluate,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: firstMatchTicket,
Profiles: firstMatchProfile,
}
)
type FirstMatchScenario struct {
}
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
func firstMatchProfile() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
@ -46,11 +38,11 @@ func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
}
}
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
func firstMatchTicket() *pb.Ticket {
return &pb.Ticket{}
}
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func firstMatchMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
tickets := poolTickets[poolName]
var matches []*pb.Match
@ -68,7 +60,7 @@ func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[s
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
func fifoEvaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the

View File

@ -14,65 +14,10 @@
package scenarios
import (
"sync"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
var (
queryServiceAddress = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",
})
)
// GameScenario defines what tickets look like, and how they should be matched.
type GameScenario interface {
// Ticket creates a new ticket, with randomized parameters.
Ticket() *pb.Ticket
// Profiles lists all of the profiles that should run.
Profiles() []*pb.MatchProfile
// MatchFunction is the custom logic implementation of the match function.
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
// Evaluate is the custom logic implementation of the evaluator.
Evaluate(stream pb.Evaluator_EvaluateServer) error
}
import "open-match.dev/open-match/pkg/pb"
// ActiveScenario sets the scenario with preset parameters that we want to use for current Open Match benchmark run.
var ActiveScenario = func() *Scenario {
var gs GameScenario = firstmatch.Scenario()
// TODO: Select which scenario to use based on some configuration or choice,
// so it's easier to run different scenarios without changing code.
gs = battleroyal.Scenario()
gs = teamshooter.Scenario()
return &Scenario{
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: gs.Ticket,
Profiles: gs.Profiles,
MMF: queryPoolsWrapper(gs.MatchFunction),
Evaluator: gs.Evaluate,
}
}()
var ActiveScenario = battleRoyalScenario
// Scenario defines the controllable fields for Open Match benchmark scenarios
type Scenario struct {
@ -113,44 +58,3 @@ func (mmf matchFunction) Run(req *pb.RunRequest, srv pb.MatchFunction_RunServer)
func (eval evaluatorFunction) Evaluate(srv pb.Evaluator_EvaluateServer) error {
return eval(srv)
}
func getQueryServiceGRPCClient() pb.QueryServiceClient {
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
startQ.Do(func() {
q = getQueryServiceGRPCClient()
})
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolTickets)
if err != nil {
return err
}
logger.WithFields(logrus.Fields{
"proposals": proposals,
}).Trace("proposals returned by match function")
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
return err
}
}
return nil
}
}

View File

@ -1,330 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TeamShooterScenario is a scenario which is designed to emulate the
// approximate behavior to open match that a skill based team game would have.
// It doesn't try to provide good matchmaking for real players. There are three
// arguments used:
// mode: The game mode the players wants to play in. mode is a hard partition.
// regions: Players may have good latency to one or more regions. A player will
// search for matches in all eligible regions.
// skill: Players have a random skill based on a normal distribution. Players
// will only be matched with other players who have a close skill value. The
// match functions have overlapping partitions of the skill brackets.
package teamshooter
import (
"fmt"
"io"
"math"
"math/rand"
"sort"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
skillArg = "skill"
modeArg = "mode"
)
// TeamShooterScenario provides the required methods for running a scenario.
type TeamShooterScenario struct {
// Names of available region tags.
regions []string
// Maximum regions a player can search in.
maxRegions int
// Number of tickets which form a match.
playersPerGame int
// For each pair of consequitive values, the value to split profiles on by
// skill.
skillBoundaries []float64
// Maximum difference between two tickets to consider a match valid.
maxSkillDifference float64
// List of mode names.
modes []string
// Returns a random mode, with some weight.
randomMode func() string
}
// Scenario creates a new TeamShooterScenario.
func Scenario() *TeamShooterScenario {
modes, randomMode := weightedChoice(map[string]int{
"pl": 100, // Payload, very popular.
"cp": 25, // Capture point, 1/4 as popular.
})
regions := []string{}
for i := 0; i < 2; i++ {
regions = append(regions, fmt.Sprintf("region_%d", i))
}
return &TeamShooterScenario{
regions: regions,
maxRegions: 1,
playersPerGame: 12,
skillBoundaries: []float64{math.Inf(-1), 0, math.Inf(1)},
maxSkillDifference: 0.01,
modes: modes,
randomMode: randomMode,
}
}
// Profiles shards the player base on mode, region, and skill.
func (t *TeamShooterScenario) Profiles() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for _, region := range t.regions {
for _, mode := range t.modes {
for i := 0; i+1 < len(t.skillBoundaries); i++ {
skillMin := t.skillBoundaries[i] - t.maxSkillDifference/2
skillMax := t.skillBoundaries[i+1] + t.maxSkillDifference/2
p = append(p, &pb.MatchProfile{
Name: fmt.Sprintf("%s_%s_%v-%v", region, mode, skillMin, skillMax),
Pools: []*pb.Pool{
{
Name: poolName,
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: skillArg,
Min: skillMin,
Max: skillMax,
},
},
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: region,
},
},
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: modeArg,
Value: mode,
},
},
},
},
})
}
}
}
return p
}
// Ticket creates a randomized player.
func (t *TeamShooterScenario) Ticket() *pb.Ticket {
region := rand.Intn(len(t.regions))
numRegions := rand.Intn(t.maxRegions) + 1
tags := []string{}
for i := 0; i < numRegions; i++ {
tags = append(tags, t.regions[region])
// The Earth is actually a circle.
region = (region + 1) % len(t.regions)
}
return &pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
skillArg: clamp(rand.NormFloat64(), -3, 3),
},
StringArgs: map[string]string{
modeArg: t.randomMode(),
},
Tags: tags,
},
}
}
// MatchFunction puts tickets into matches based on their skill, finding the
// required number of tickets for a game within the maximum skill difference.
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
skill := func(t *pb.Ticket) float64 {
return t.SearchFields.DoubleArgs[skillArg]
}
tickets := poolTickets[poolName]
var matches []*pb.Match
sort.Slice(tickets, func(i, j int) bool {
return skill(tickets[i]) < skill(tickets[j])
})
for i := 0; i+t.playersPerGame <= len(tickets); i++ {
mt := tickets[i : i+t.playersPerGame]
if skill(mt[len(mt)-1])-skill(mt[0]) < t.maxSkillDifference {
avg := float64(0)
for _, t := range mt {
avg += skill(t)
}
avg /= float64(len(mt))
q := float64(0)
for _, t := range mt {
diff := skill(t) - avg
q -= diff * diff
}
m, err := (&matchExt{
id: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
matchProfile: p.GetName(),
matchFunction: "skillmatcher",
tickets: mt,
quality: q,
}).pack()
if err != nil {
return nil, err
}
matches = append(matches, m)
}
}
return matches, nil
}
// Evaluate returns matches in order of highest quality, skipping any matches
// which contain tickets that are already used.
func (t *TeamShooterScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
// Unpacked proposal matches.
proposals := []*matchExt{}
// Ticket ids which are used in a match.
used := map[string]struct{}{}
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Error reading evaluator input stream: %w", err)
}
p, err := unpackMatch(req.GetMatch())
if err != nil {
return err
}
proposals = append(proposals, p)
}
// Higher quality is better.
sort.Slice(proposals, func(i, j int) bool {
return proposals[i].quality > proposals[j].quality
})
outer:
for _, p := range proposals {
for _, t := range p.tickets {
if _, ok := used[t.Id]; ok {
continue outer
}
}
for _, t := range p.tickets {
used[t.Id] = struct{}{}
}
err := stream.Send(&pb.EvaluateResponse{MatchId: p.id})
if err != nil {
return fmt.Errorf("Error sending evaluator output stream: %w", err)
}
}
return nil
}
// matchExt presents the match and extension data in a native form, and allows
// easy conversion to and from proto format.
type matchExt struct {
id string
tickets []*pb.Ticket
quality float64
matchProfile string
matchFunction string
}
func unpackMatch(m *pb.Match) (*matchExt, error) {
v := &wrappers.DoubleValue{}
err := ptypes.UnmarshalAny(m.Extensions["quality"], v)
if err != nil {
return nil, fmt.Errorf("Error unpacking match quality: %w", err)
}
return &matchExt{
id: m.MatchId,
tickets: m.Tickets,
quality: v.Value,
matchProfile: m.MatchProfile,
matchFunction: m.MatchFunction,
}, nil
}
func (m *matchExt) pack() (*pb.Match, error) {
v := &wrappers.DoubleValue{Value: m.quality}
a, err := ptypes.MarshalAny(v)
if err != nil {
return nil, fmt.Errorf("Error packing match quality: %w", err)
}
return &pb.Match{
MatchId: m.id,
Tickets: m.tickets,
MatchProfile: m.matchProfile,
MatchFunction: m.matchFunction,
Extensions: map[string]*any.Any{
"quality": a,
},
}, nil
}
func clamp(v float64, min float64, max float64) float64 {
if v < min {
return min
}
if v > max {
return max
}
return v
}
// weightedChoice takes a map of values, and their relative probability. It
// returns a list of the values, along with a function which will return random
// choices from the values with the weighted probability.
func weightedChoice(m map[string]int) ([]string, func() string) {
s := make([]string, 0, len(m))
total := 0
for k, v := range m {
s = append(s, k)
total += v
}
return s, func() string {
remainder := rand.Intn(total)
for k, v := range m {
remainder -= v
if remainder < 0 {
return k
}
}
panic("weightedChoice is broken.")
}
}

View File

@ -0,0 +1,137 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scenarios
import (
"fmt"
"io"
"sync"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
var (
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",
})
)
// StatProcessor uses syncMaps to store the stress test metrics and occurrence of errors.
// It can write out the data to an input io.Writer.
type StatProcessor struct {
em *sync.Map
sm *sync.Map
}
// NewStatProcessor returns an initialized StatProcessor
func NewStatProcessor() *StatProcessor {
return &StatProcessor{
em: &sync.Map{},
sm: &sync.Map{},
}
}
// SetStat sets the value for a key
func (e StatProcessor) SetStat(k string, v interface{}) {
e.sm.Store(k, v)
}
// IncrementStat atomically increments the value of a key by delta
func (e StatProcessor) IncrementStat(k string, delta interface{}) {
statRead, ok := e.sm.Load(k)
if !ok {
statRead = 0
}
switch delta.(type) {
case int:
e.sm.Store(k, statRead.(int)+delta.(int))
case float32:
e.sm.Store(k, statRead.(float32)+delta.(float32))
case float64:
e.sm.Store(k, statRead.(float64)+delta.(float64))
default:
logger.Errorf("IncrementStat: type %T not supported", delta)
}
}
// RecordError atomically records the occurrence of input errors
func (e StatProcessor) RecordError(desc string, err error) {
errMsg := fmt.Sprintf("%s: %s", desc, err.Error())
errRead, ok := e.em.Load(errMsg)
if !ok {
errRead = 0
}
e.em.Store(errMsg, errRead.(int)+1)
}
// Log writes the formatted errors and metrics to the input writer
func (e StatProcessor) Log(w io.Writer) {
e.sm.Range(func(k interface{}, v interface{}) bool {
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
return true
})
e.em.Range(func(k interface{}, v interface{}) bool {
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
return true
})
}
func getQueryServiceGRPCClient() pb.QueryServiceClient {
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
startQ.Do(func() {
q = getQueryServiceGRPCClient()
})
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolTickets)
if err != nil {
return err
}
logger.WithFields(logrus.Fields{
"proposals": proposals,
}).Trace("proposals returned by match function")
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
return err
}
}
return nil
}
}

10
go.mod
View File

@ -15,7 +15,7 @@ module open-match.dev/open-match
// limitations under the License.
// When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
go 1.14
go 1.13
require (
cloud.google.com/go v0.47.0 // indirect
@ -23,9 +23,8 @@ require (
contrib.go.opencensus.io/exporter/ocagent v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
github.com/TV4/logrus-stackdriver-formatter v0.1.0
github.com/alicebob/miniredis/v2 v2.11.0
github.com/alicebob/miniredis/v2 v2.10.1
github.com/apache/thrift v0.13.0 // indirect
github.com/aws/aws-sdk-go v1.25.27 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
@ -33,7 +32,7 @@ require (
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/golang/protobuf v1.3.2
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
@ -45,7 +44,6 @@ require (
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.2.1
github.com/pseudomuto/protoc-gen-doc v1.3.2 // indirect
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/afero v1.2.1 // indirect
@ -53,10 +51,10 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stretchr/testify v1.4.0
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.13.0 // indirect

45
go.sum
View File

@ -20,17 +20,9 @@ contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZ
contrib.go.opencensus.io/exporter/stackdriver v0.12.8 h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=
contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71 h1:J52um+Sp3v8TpSY0wOgpjr84np+xvrY3503DRirJ6wI=
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71/go.mod h1:E4OavwrrOME3uj3Zm9Rla8ZDqlAR5GqKA+mMIPoilYk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/FZambia/sentinel v1.0.0 h1:KJ0ryjKTZk5WMp0dXvSdNqp3lFaW1fNFuEYfrkLOYIc=
github.com/FZambia/sentinel v1.0.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w=
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -42,11 +34,9 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.11.0 h1:Dz6uJ4w3Llb1ZiFoqyzF9aLuzbsEWCeKwstu9MzmSAk=
github.com/alicebob/miniredis/v2 v2.11.0/go.mod h1:UA48pmi7aSazcGAvcdKcBB49z521IC9VjTTRz2nIaJE=
github.com/alicebob/miniredis/v2 v2.10.1 h1:r+hpRUqYCcIsrjxH/wRLwQGmA2nkQf4IYj7MKPwbA+s=
github.com/alicebob/miniredis/v2 v2.10.1/go.mod h1:gUxwu+6dLLmJHIXOOBlgcXqbcpPPp+NzOnBzgqFIGYA=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -75,7 +65,6 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -86,8 +75,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -115,16 +102,13 @@ github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4er
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY=
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible h1:1mCVU17Wc8oyVUlx1ZXpnWz1DNP6v0R5z5ElKCTvVrY=
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -138,9 +122,6 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@ -164,9 +145,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
@ -192,8 +170,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
@ -205,8 +181,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 h1:28i1IjGcx8AofiB4N3q5Yls55VEaitzuEPkFJEVgGkA=
github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -223,7 +197,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@ -253,10 +226,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/pseudomuto/protoc-gen-doc v1.3.2 h1:61vWZuxYa8D7Rn4h+2dgoTNqnluBmJya2MgbqO32z6g=
github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA=
github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM=
github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -283,7 +252,6 @@ github.com/spf13/viper v1.5.0 h1:GpsTwfsQ27oS/Aha/6d1oD7tpKIqWnOA6tgOX9HHkt4=
github.com/spf13/viper v1.5.0/go.mod h1:AkYRkVJF8TkSG/xet6PzXX+l39KhhXa2pdqVSxnTcn4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
@ -295,8 +263,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/gopher-lua v0.0.0-20190206043414-8bfc7677f583/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c h1:RCby8AaF+weuP1M+nwMQ4uQYO2shgD6UFAKvnXszwTw=
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -306,7 +274,6 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -361,7 +328,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -430,7 +396,6 @@ google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=

View File

@ -20,6 +20,28 @@ metadata:
app: open-match-demo
release: open-match-demo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: customize-configmap
namespace: open-match-demo
labels:
app: open-match-customize
component: config
release: open-match-demo
data:
matchmaker_config_default.yaml: |-
api:
functions:
hostname: "om-function"
grpcport: 50502
httpport: 51502
matchmaker_config_override.yaml: |-
api:
query:
hostname: "om-query.open-match.svc.cluster.local"
grpcport: "50503"
---
kind: Service
apiVersion: v1
metadata:
@ -86,9 +108,21 @@ spec:
component: matchfunction
release: open-match-demo
spec:
volumes:
- name: customize-config-volume
configMap:
name: customize-configmap
- name: om-config-volume-default
configMap:
name: customize-configmap
containers:
- name: om-function
image: "gcr.io/open-match-public-images/openmatch-mmf-go-soloduel:0.0.0-dev"
volumeMounts:
- name: customize-config-volume
mountPath: /app/config/override
- name: om-config-volume-default
mountPath: /app/config/default
image: "gcr.io/open-match-public-images/openmatch-mmf-go-soloduel:0.9.0"
ports:
- name: grpc
containerPort: 50502
@ -125,7 +159,7 @@ spec:
spec:
containers:
- name: om-demo
image: "gcr.io/open-match-public-images/openmatch-demo-first-match:0.0.0-dev"
image: "gcr.io/open-match-public-images/openmatch-demo-first-match:0.9.0"
imagePullPolicy: Always
ports:
- name: http

View File

@ -13,13 +13,13 @@
# limitations under the License.
apiVersion: v2
appVersion: "1.1.0"
version: 1.1.0
appVersion: "0.9.0"
version: 0.9.0
name: open-match
dependencies:
- name: redis
version: 9.5.0
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts.storage.googleapis.com/
condition: open-match-core.redis.enabled
- name: open-match-telemetry
version: 0.0.0-dev

View File

@ -1,20 +0,0 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{- define "openmatchcustomize.function.hostName" -}}
{{- .Values.function.hostName | default (printf "%s-function" (include "openmatch.fullname" . ) ) -}}
{{- end -}}

View File

@ -0,0 +1,41 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: customize-configmap
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: config
release: {{ .Release.Name }}
data:
matchmaker_config_default.yaml: |-
api:
functions:
hostname: "{{ .Values.function.hostName }}"
grpcport: "{{ .Values.function.grpcPort }}"
httpport: "{{ .Values.function.httpPort }}"
evaluator:
hostname: "{{ .Values.evaluator.hostName }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
matchmaker_config_override.yaml: |-
api:
query:
hostname: "{{ .Values.query.hostName }}.{{ .Release.Namespace }}.svc.cluster.local"
grpcport: "{{ .Values.query.grpcPort }}"

View File

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "openmatch.name" . }}
@ -83,11 +83,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.evaluatorConfigs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.evaluator.hostName" . }}
- name: {{ .Values.evaluator.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -84,11 +84,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.mmfConfigs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.mmfConfigs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchcustomize.function.hostName" . }}
- name: {{ .Values.function.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.mmfConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -26,7 +26,7 @@ evaluator:
enabled: false
replicas: 3
portType: ClusterIP
image: openmatch-default-evaluator
image: openmatch-evaluator-go-simple
evaluatorConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
@ -35,13 +35,11 @@ evaluatorConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: customize-configmap
customize:
volumeName: om-config-volume-override
volumeName: customize-config-volume
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: customize-configmap
mmfConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
@ -50,10 +48,8 @@ mmfConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: customize-configmap
customize:
volumeName: om-config-volume-override
volumeName: customize-config-volume
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: customize-configmap

View File

@ -320,100 +320,6 @@
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 19
},
"id": 24,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(scale_frontend_runners_waiting)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Runners Waiting To Start",
"refId": "A"
},
{
"expr": "sum(scale_frontend_runners_creating)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"legendFormat": "Runners Creating Ticket",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Outstanding Frontend Runners",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,

View File

@ -1,42 +0,0 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openmatchscale.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "openmatchscale.scaleBackend.hostName" -}}
{{- .Values.scaleBackend.hostName | default (printf "%s-backend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatchscale.scaleFrontend.hostName" -}}
{{- .Values.scaleFrontend.hostName | default (printf "%s-frontend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}

View File

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
name: {{ .Values.scaleBackend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
name: {{ .Values.scaleBackend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchscale.scaleBackend.hostName" . }}
- name: {{ .Values.scaleBackend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -0,0 +1,108 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: scale-configmap
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: config
release: {{ .Release.Name }}
data:
matchmaker_config_default.yaml: |-
api:
backend:
hostname: "{{ .Values.backend.hostName }}"
grpcport: "{{ .Values.backend.grpcPort }}"
httpport: "{{ .Values.backend.httpPort }}"
frontend:
hostname: "{{ .Values.frontend.hostName }}"
grpcport: "{{ .Values.frontend.grpcPort }}"
httpport: "{{ .Values.frontend.httpPort }}"
scale:
httpport: "51509"
{{- if .Values.global.tls.enabled }}
tls:
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
certificatefile: "{{.Values.global.tls.server.mountPath}}/public.cert"
privatekey: "{{.Values.global.tls.server.mountPath}}/private.key"
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
{{- end }}
logging:
level: debug
{{- if .Values.global.telemetry.stackdriverMetrics.enabled }}
format: stackdriver
{{- else }}
format: text
{{- end }}
rpc: {{ .Values.global.logging.rpc.enabled }}
# Open Match applies the exponential backoff strategy for its retryable gRPC calls.
# The settings below are the default backoff configuration used in Open Match.
# See https://github.com/cenkalti/backoff/blob/v3/exponential.go for detailed explanations
backoff:
# The initial retry interval (in milliseconds)
initialInterval: 100ms
# maxInterval caps the maximum time elapsed for a retry interval
maxInterval: 500ms
# The next retry interval is multiplied by this multiplier
multiplier: 1.5
# Randomize the retry interval
randFactor: 0.5
# maxElapsedTime caps the retry time (in milliseconds)
maxElapsedTime: 3000ms
telemetry:
zpages:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"
serviceDiscovery: "{{ .Values.global.telemetry.prometheus.serviceDiscovery }}"
stackdriverMetrics:
enable: "{{ .Values.global.telemetry.stackdriverMetrics.enabled }}"
gcpProjectId: "{{ .Values.global.gcpProjectId }}"
prefix: "{{ .Values.global.telemetry.stackdriverMetrics.prefix }}"
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
matchmaker_config_override.yaml: |-
testConfig:
profile: "{{ .Values.testConfig.profile }}"
regions:
{{- range .Values.testConfig.regions }}
- {{ . }}
{{- end }}
characters:
{{- range .Values.testConfig.characters }}
- {{ . }}
{{- end }}
minRating: "{{ .Values.testConfig.minRating }}"
maxRating: "{{ .Values.testConfig.maxRating }}"
ticketsPerMatch: "{{ .Values.testConfig.ticketsPerMatch }}"
multifilter:
rangeSize: "{{ .Values.testConfig.multifilter.rangeSize }}"
rangeOverlap: "{{ .Values.testConfig.multifilter.rangeOverlap }}"
multipool:
rangeSize: "{{ .Values.testConfig.multipool.rangeSize }}"
rangeOverlap: "{{ .Values.testConfig.multipool.rangeOverlap }}"
characterCount: "{{ .Values.testConfig.multipool.characterCount }}"

View File

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
name: {{ .Values.scaleFrontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
name: {{ .Values.scaleFrontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
- name: {{ .Values.scaleFrontend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatchscale.fullname" . }}-dashboard
name: open-match-scale-dashboard
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"

View File

@ -13,13 +13,13 @@
# limitations under the License.
scaleFrontend:
hostName:
hostName: om-scale-frontend
httpPort: 51509
replicas: 1
image: openmatch-scale-frontend
scaleBackend:
hostName:
hostName: om-scale-backend
httpPort: 51509
replicas: 1
image: openmatch-scale-backend
@ -28,10 +28,29 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
override:
volumeName: om-config-volume-override
configName: scale-configmap
scale-configmap:
volumeName: scale-config-volume
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: scale-configmap
testConfig:
profile: scaleprofiles
regions:
- region.europe-west1
- region.europe-west2
- region.europe-west3
- region.europe-west4
characters:
- cleric
- knight
minRating: 0
maxRating: 100
ticketsPerMatch: 8
multifilter:
rangeSize: 10
rangeOverlap: 5
multipool:
rangeSize: 10
rangeOverlap: 5
characterCount: 4

View File

@ -20,14 +20,14 @@ version: 0.0.0-dev
dependencies:
- name: prometheus
version: 9.2.0
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts.storage.googleapis.com/
condition: global.telemetry.prometheus.enabled,prometheus.enabled
- name: grafana
version: 4.0.1
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts.storage.googleapis.com/
condition: global.telemetry.grafana.enabled,grafana.enabled
- name: jaeger
version: 0.13.3
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts-incubator.storage.googleapis.com/
condition: global.telemetry.jaeger.enabled,jaeger.enabled

View File

@ -62,7 +62,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (pod_name) (\n\nsum(\n rate(container_cpu_usage_seconds_total{container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))\n) by (pod_name, container_name)\n\n/\n\nsum(\n (container_spec_cpu_quota{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n /\n (container_spec_cpu_period{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n) by (pod_name, container_name)\n\n*\n\n100\n)\n",
"expr": "avg by (pod_name) (\n sum(\n rate(container_cpu_usage_seconds_total{pod_name=~\"om-.*\", container_name!=\"POD\"}[5m])\n ) by (pod_name, container_name) \n \n /\n \n sum(\n container_spec_cpu_quota{pod_name=~\"om-.*\", container_name!=\"POD\"} / container_spec_cpu_period{pod_name=~\"om-.*\", container_name!=\"POD\"}\n ) by (pod_name, container_name) \n \n * \n \n 100\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}}",
@ -155,7 +155,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_goroutines{app=~\"open-match\"})",
"expr": "avg by (component) (go_goroutines{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -256,7 +256,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\"})",
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -265,7 +265,7 @@
"step": 4
},
{
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\"})",
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -365,7 +365,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -374,7 +374,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -475,7 +475,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -484,7 +484,7 @@
"step": 4
},
{
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - alloc rate",
@ -493,7 +493,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -502,7 +502,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -604,7 +604,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -613,7 +613,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -622,7 +622,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -719,7 +719,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (process_open_fds{app=~\"open-match\"})",
"expr": "avg by (component) (process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -815,7 +815,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -911,7 +911,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\"})",
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}: {{quantile}}",

View File

@ -15,8 +15,8 @@
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 2,
"iteration": 1580944984710,
"id": 3,
"iteration": 1580245993833,
"links": [],
"panels": [
{
@ -415,7 +415,7 @@
},
"id": 57,
"panels": [],
"title": "openmatch.QueryService/QueryTickets",
"title": "openmatch.Mmlogic/QueryTickets",
"type": "row"
},
{
@ -812,7 +812,7 @@
},
"id": 29,
"panels": [],
"title": "openmatch.BackendService/AssignTickets",
"title": "openmatch.Backend/AssignTickets",
"type": "row"
},
{
@ -1210,7 +1210,7 @@
"id": 31,
"panels": [],
"repeat": null,
"title": "openmatch.FrontendService/CreateTicket",
"title": "openmatch.Frontend/CreateTicket",
"type": "row"
},
{
@ -2399,7 +2399,7 @@
},
"id": 42,
"panels": [],
"title": "openmatch.BackendService/FetchMatches",
"title": "openmatch.Frontend/FetchMatches",
"type": "row"
},
{
@ -3191,7 +3191,7 @@
},
"id": 23,
"panels": [],
"title": "openmatch.FrontendService/DeleteTicket",
"title": "openmatch.Frontend/DeleteTicket",
"type": "row"
},
{

View File

@ -16,8 +16,8 @@
"editable": true,
"gnetId": 763,
"graphTooltip": 0,
"id": 6,
"iteration": 1580946687856,
"id": 2,
"iteration": 1579655194536,
"links": [],
"panels": [
{
@ -296,7 +296,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"fill": 1,
"gridPos": {
"h": 7,
"w": 8,
@ -312,8 +312,6 @@
"min": false,
"rightSide": true,
"show": true,
"sort": "current",
"sortDesc": true,
"total": false,
"values": true
},
@ -327,54 +325,24 @@
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "limit",
"color": "#C4162A",
"hideTooltip": true,
"legend": false,
"nullPointMode": "connected"
},
{
"alias": "request",
"color": "#73BF69",
"hideTooltip": true,
"legend": false,
"nullPointMode": "connected"
}
],
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"redis\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name, container_name) /\nsum(container_spec_cpu_quota{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}/container_spec_cpu_period{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}) by (pod_name, container_name) * 100",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}} usage",
"legendFormat": "{{pod_name}}",
"refId": "A"
},
{
"expr": "sum(kube_pod_container_resource_limits_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "limit",
"refId": "B"
},
{
"expr": "sum(kube_pod_container_resource_requests_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "request",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "CPU Usage",
"title": "CPU Usage Percentage of Limit",
"tooltip": {
"shared": true,
"sort": 0,
@ -392,7 +360,7 @@
"yaxes": [
{
"format": "short",
"label": "core",
"label": "%",
"logBase": 1,
"max": null,
"min": null,
@ -660,8 +628,6 @@
"min": false,
"rightSide": true,
"show": true,
"sort": "current",
"sortDesc": true,
"total": false,
"values": true
},
@ -689,13 +655,6 @@
"refId": "A",
"step": 240,
"target": ""
},
{
"expr": "sum by (kubernetes_pod_name) (rate(redis_commands_total[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "total - {{kubernetes_pod_name}}",
"refId": "B"
}
],
"thresholds": [],
@ -952,8 +911,8 @@
{
"allValue": null,
"current": {
"text": "10.28.0.12:9121",
"value": "10.28.0.12:9121"
"text": "10.28.0.27:9121",
"value": "10.28.0.27:9121"
},
"datasource": "Prometheus",
"definition": "label_values(redis_up, instance)",

View File

@ -0,0 +1,290 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 3,
"iteration": 1562886170229,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(frontend_tickets_created[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Created",
"refId": "A"
},
{
"expr": "sum(rate(frontend_tickets_deleted[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Deleted",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Flow",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"description": "",
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 4,
"interval": "",
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(frontend_tickets_assignments_retrieved[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Assignments Retrieved",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Assignments",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "reqps",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"schemaVersion": 18,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
"current": {
"text": "5m",
"value": "5m"
},
"hide": 0,
"includeAll": false,
"label": "Time Window",
"multi": false,
"name": "timewindow",
"options": [
{
"selected": true,
"text": "5m",
"value": "5m"
},
{
"selected": false,
"text": "10m",
"value": "10m"
},
{
"selected": false,
"text": "15m",
"value": "15m"
},
{
"selected": false,
"text": "30m",
"value": "30m"
},
{
"selected": false,
"text": "1h",
"value": "1h"
},
{
"selected": false,
"text": "4h",
"value": "4h"
}
],
"query": "5m,10m,15m,30m,1h,4h",
"skipUrlSync": false,
"type": "custom"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Tickets",
"uid": "TlgyFfIWz",
"version": 6
}

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.fullname" . }}-dashboards
name: open-match-dashboards
labels:
grafana_dashboard: "1"
data:

View File

@ -142,10 +142,17 @@ grafana:
notifiers: {}
sidecar:
dashboards:
enabled: true
datasources:
enabled: true
enabled: true
plugins: grafana-piechart-panel
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://open-match-prometheus-server.{{ .Release.Namespace }}.svc.cluster.local:80/
access: proxy
isDefault: true
jaeger:
enabled: true

View File

@ -22,26 +22,6 @@ Expand the name of the chart.
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
Instead of .Chart.Name, we hard-code "open-match" as we need to call this from subcharts, but get the
same result as if called from this chart.
*/}}
{{- define "openmatch.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default "open-match" .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Render chart metadata labels: "chart", "heritage" unless "openmatch.noChartMeta" is set.
*/}}
@ -77,7 +57,7 @@ resources:
{{- range $configIndex, $configValues := .configs }}
- name: {{ $configValues.volumeName }}
configMap:
name: {{ tpl $configValues.configName $ }}
name: {{ $configValues.configName }}
{{- end }}
{{- end -}}
@ -94,10 +74,10 @@ resources:
{{- if .Values.global.tls.enabled }}
- name: tls-server-volume
secret:
secretName: {{ include "openmatch.fullname" . }}-tls-server
secretName: om-tls-server
- name: root-ca-volume
secret:
secretName: {{ include "openmatch.fullname" . }}-tls-rootca
secretName: om-tls-rootca
{{- end -}}
{{- end -}}
@ -112,7 +92,7 @@ resources:
{{- if .Values.redis.usePassword }}
- name: redis-password
secret:
secretName: {{ include "call-nested" (list . "redis" "redis.fullname") }}
secretName: {{ .Values.redis.fullnameOverride }}
{{- end -}}
{{- end -}}
@ -155,72 +135,3 @@ minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.serviceAccount.name" -}}
{{- .Values.global.kubernetes.serviceAccount | default (printf "%s-unprivileged-service" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.swaggerui.hostName" -}}
{{- .Values.swaggerui.hostName | default (printf "%s-swaggerui" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.query.hostName" -}}
{{- .Values.query.hostName | default (printf "%s-query" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.frontend.hostName" -}}
{{- .Values.frontend.hostName | default (printf "%s-frontend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.backend.hostName" -}}
{{- .Values.backend.hostName | default (printf "%s-backend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.synchronizer.hostName" -}}
{{- .Values.synchronizer.hostName | default (printf "%s-synchronizer" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.evaluator.hostName" -}}
{{- .Values.evaluator.hostName | default (printf "%s-evaluator" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.configmap.default" -}}
{{- printf "%s-configmap-default" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.configmap.override" -}}
{{- printf "%s-configmap-override" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.jaeger.agent" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.agent.name") }}:6831
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "openmatch.jaeger.collector" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
http://{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.collector.name") }}:14268/api/traces
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Call templates from sub-charts in a synthesized context, workaround for https://github.com/helm/helm/issues/3920
Mainly useful for things like `{{ include "call-nested" (list . "redis" "redis.fullname") }}`
https://github.com/helm/helm/issues/4535#issuecomment-416022809
https://github.com/helm/helm/issues/4535#issuecomment-477778391
*/}}
{{- define "call-nested" }}
{{- $dot := index . 0 }}
{{- $subchart := index . 1 | splitList "." }}
{{- $template := index . 2 }}
{{- $values := $dot.Values }}
{{- range $subchart }}
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.backend.hostName" . }}
- name: {{ .Values.backend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.frontend.hostName" . }}
- name: {{ .Values.frontend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.configmap.default" . }}
name: om-configmap-default
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -50,33 +50,28 @@ data:
api:
backend:
hostname: "{{ include "openmatch.backend.hostName" . }}"
hostname: "{{ .Values.backend.hostName }}"
grpcport: "{{ .Values.backend.grpcPort }}"
httpport: "{{ .Values.backend.httpPort }}"
frontend:
hostname: "{{ include "openmatch.frontend.hostName" . }}"
hostname: "{{ .Values.frontend.hostName }}"
grpcport: "{{ .Values.frontend.grpcPort }}"
httpport: "{{ .Values.frontend.httpPort }}"
query:
hostname: "{{ include "openmatch.query.hostName" . }}"
hostname: "{{ .Values.query.hostName }}"
grpcport: "{{ .Values.query.grpcPort }}"
httpport: "{{ .Values.query.httpPort }}"
synchronizer:
hostname: "{{ include "openmatch.synchronizer.hostName" . }}"
hostname: "{{ .Values.synchronizer.hostName }}"
grpcport: "{{ .Values.synchronizer.grpcPort }}"
httpport: "{{ .Values.synchronizer.httpPort }}"
swaggerui:
hostname: "{{ include "openmatch.swaggerui.hostName" . }}"
hostname: "{{ .Values.swaggerui.hostName }}"
httpport: "{{ .Values.swaggerui.httpPort }}"
# Configurations for api.test and api.scale are used for testing.
test:
hostname: "{{ include "openmatch.fullname" . }}-test"
grpcport: "50509"
scale-frontend:
httpport: "51509"
scale:
httpport: "51509"
scale-backend:
httpport: "51510"
{{- if .Values.global.tls.enabled }}
tls:
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
@ -85,42 +80,39 @@ data:
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
{{- end }}
storage:
ignoreListTTL: {{ index .Values "open-match-core" "ignoreListTTL" }}
page:
size: 10000
redis:
{{- if index .Values "open-match-core" "redis" "enabled" }}
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
hostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
{{- if index .Values "open-match-core" "redis" "install" }}
hostname: {{ .Values.redis.fullnameOverride }}-master.{{ .Release.Namespace }}.svc.cluster.local
port: {{ .Values.redis.redisPort }}
user: {{ .Values.redis.user }}
{{- end}}
{{- else }}
# BYO Redis setups
hostname: {{ index .Values "open-match-core" "redis" "hostname" }}
port: {{ index .Values "open-match-core" "redis" "port" }}
user: {{ index .Values "open-match-core" "redis" "user" }}
{{- end }}
usePassword: {{ .Values.redis.usePassword }}
{{- if .Values.redis.usePassword }}
passwordPath: {{ .Values.redis.secretMountPath }}/redis-password
{{- end }}
pool:
maxIdle: {{ index .Values "open-match-core" "redis" "pool" "maxIdle" }}
maxActive: {{ index .Values "open-match-core" "redis" "pool" "maxActive" }}
idleTimeout: {{ index .Values "open-match-core" "redis" "pool" "idleTimeout" }}
healthCheckTimeout: {{ index .Values "open-match-core" "redis" "pool" "healthCheckTimeout" }}
expiration: 43200
telemetry:
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
traceSamplingFraction: "{{ .Values.global.telemetry.traceSamplingFraction }}"
zpages:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
agentEndpoint: "{{ tpl .Values.global.telemetry.jaeger.agentEndpoint . }}"
collectorEndpoint: "{{ tpl .Values.global.telemetry.jaeger.collectorEndpoint . }}"
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.configmap.override" . }}
name: om-configmap-override
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -25,24 +25,12 @@ metadata:
release: {{ .Release.Name }}
data:
matchmaker_config_override.yaml: |-
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: {{ index .Values "open-match-core" "registrationInterval" }}
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: {{ index .Values "open-match-core" "proposalCollectionInterval" }}
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: {{ index .Values "open-match-core" "pendingReleaseTimeout" }}
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
api:
evaluator:
hostname: "{{ include "openmatch.evaluator.hostName" . }}"
hostname: "{{ .Values.evaluator.hostName }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
synchronizer:
registrationIntervalMs: 250ms
proposalCollectionIntervalMs: 20000ms
{{- end }}

View File

@ -14,11 +14,11 @@
{{- if index .Values "open-match-core" "enabled" }}
{{- if empty .Values.ci }}
# This is the least restricted PSP used to create privileged pods to disable THP in host kernel.
# om-redis-podsecuritypolicy is the least restricted PSP used to create privileged pods to disable THP in host kernel.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
name: om-redis-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
@ -51,11 +51,11 @@ spec:
fsGroup:
rule: 'RunAsAny'
---
# This does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
# om-core-podsecuritypolicy does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
name: om-core-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.query.hostName" . }}
- name: {{ .Values.query.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -29,7 +29,7 @@ metadata:
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openmatch.serviceAccount.name" . }}
name: {{ .Values.global.kubernetes.serviceAccount }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,26 +40,28 @@ automountServiceAccountToken: true
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role
name: om-service-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-service-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
- om-core-podsecuritypolicy
verbs:
- use
---
# This applies om-service-role to the open-match unprivileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role-binding
name: om-service-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -71,32 +73,34 @@ subjects:
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-service-role
name: om-service-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role
name: om-redis-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-redis-role to use om-redis-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
- om-redis-podsecuritypolicy
verbs:
- use
---
# This applies om-redis role to the om-redis privileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role-binding
name: om-redis-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -104,10 +108,10 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "call-nested" (list . "redis" "redis.serviceAccountName") }}
name: {{ .Values.redis.serviceAccount.name }} # Redis service account
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-redis-role
name: om-redis-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if index .Values "open-match-core" "swaggerui" "enabled" }}
{{- if index .Values "open-match-core" "enabled" }}
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.swaggerui.hostName" . }}
name: {{ .Values.swaggerui.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -36,7 +36,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.swaggerui.hostName" . }}
name: {{ .Values.swaggerui.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -61,11 +61,11 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.swaggerui.hostName" . }}
- name: {{ .Values.swaggerui.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.synchronizer.hostName" . }}
name: {{ .Values.synchronizer.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,7 +40,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.synchronizer.hostName" . }}
name: {{ .Values.synchronizer.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -66,12 +66,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.synchronizer.hostName" . }}
- name: {{ .Values.synchronizer.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -1,23 +1,8 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
# This applies om-test-role to the open-match-test-service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-test-role-binding
name: om-test-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -25,11 +10,9 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "openmatch.fullname" . }}-test-service
name: open-match-test-service
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-test-role
name: om-test-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -1,38 +1,23 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-test-role
name: om-test-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-test-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
- om-core-podsecuritypolicy
verbs:
- use
# Grant this role get & list permission for k8s endpoints and pods resources
# Grant om-test-role get & list permission for k8s endpoints and pods resources
# Required for e2e in-cluster testing.
- apiGroups:
- ""
@ -42,5 +27,3 @@ rules:
verbs:
- get
- list
{{- end }}

View File

@ -1,29 +1,11 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
# Create a service account for test services.
# Create a service account for open-match-test services.
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openmatch.fullname" . }}-test-service
name: open-match-test-service
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
automountServiceAccountToken: true
{{- end }}

View File

@ -1,83 +1,24 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.fullname" . }}-test
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: test
release: {{ .Release.Name }}
spec:
selector:
app: {{ template "openmatch.name" . }}
component: test
release: {{ .Release.Name }}
ports:
- name: grpc
protocol: TCP
port: 50509
- name: http
protocol: TCP
port: 51509
---
apiVersion: v1
kind: Pod
metadata:
name: {{ include "openmatch.fullname" . }}-test
name: om-test
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
"helm.sh/hook": test-success
labels:
app: {{ template "openmatch.name" . }}
component: test
component: om-test
release: {{ .Release.Name }}
spec:
# Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.
activeDeadlineSeconds: 900
serviceAccountName: {{ include "openmatch.fullname" . }}-test-service
serviceAccountName: open-match-test-service
automountServiceAccountToken: true
volumes:
- configMap:
defaultMode: 420
name: {{ include "openmatch.configmap.default" . }}
name: om-config-volume-default
- configMap:
defaultMode: 420
name: {{ include "openmatch.configmap.override" . }}
name: om-config-volume-override
containers:
- name: {{ include "openmatch.fullname" . }}-test
volumeMounts:
- mountPath: /app/config/default
name: om-config-volume-default
- mountPath: /app/config/override
name: om-config-volume-override
image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
ports:
- name: grpc
containerPort: 50509
- name: http
containerPort: 51509
- image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
imagePullPolicy: Always
name: test
name: om-test
resources:
limits:
memory: 800Mi
@ -91,7 +32,7 @@ spec:
command: ["go"]
args:
- "test"
- "./internal/testing/e2e"
- "./test/e2e"
- "-v"
- "-timeout"
- "150s"
@ -99,5 +40,3 @@ spec:
- "-tags"
- "e2ecluster"
restartPolicy: Never
{{- end }}

View File

@ -17,7 +17,7 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openmatch.fullname" . }}-tls-rootca
name: om-tls-rootca
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -31,9 +31,9 @@ data:
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openmatch.fullname" . }}-tls-server
name: om-tls-server
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
annotations: {{- include "openmatch.chartmeta" . | nindent 2 }}
labels:
app: {{ template "openmatch.name" . }}
component: tls

View File

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Override the default in-cluster domain name for the `query` service to om-query.
# # Specifies om-query as the in-cluster domain name for the `query` service.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,68 +44,67 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName:
hostName: om-swaggerui
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName:
hostName: om-query
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName:
hostName: om-frontend
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName:
hostName: om-backend
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName:
hostName: om-synchronizer
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName:
hostName: om-evaluator
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName:
hostName: om-function
grpcPort: 50502
httpPort: 51502
replicas: 3
# Specifies the location and name of the Open Match application-level config volumes.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -113,10 +112,6 @@ redis:
configmap: |
maxclients 100000
maxmemory 500000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -134,6 +129,7 @@ redis:
slaveCount: 3
serviceAccount:
create: true
name: open-match-redis-service
slave:
persistence:
enabled: false
@ -172,42 +168,26 @@ redis:
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: 20s
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: 1m
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
ignoreListTTL: 60000ms
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
install: true
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 500
maxActive: 500
idleTimeout: 0
healthCheckTimeout: 300ms
swaggerui:
enabled: false
# Controls if users need to install scale testing setup for Open Match.
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -220,6 +200,7 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -246,8 +227,8 @@ global:
limits:
memory: 3Gi
cpu: 2
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -269,21 +250,21 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.0.0-dev
tag: 0.9.0
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
telemetry:
reportingPeriod: "1m"
traceSamplingFraction: 0.005 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
samplerFraction: 0.005 # Configures a sampler that samples a given fraction of traces.
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
enabled: false
endpoint: "/metrics"
@ -293,3 +274,4 @@ global:
prefix: "open_match"
grafana:
enabled: false
reportingPeriod: "1m"

View File

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Override the default in-cluster domain name for the `query` service to om-query.
# # Specifies om-query as the in-cluster domain name for the `query` service.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,46 +44,46 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName:
hostName: om-swaggerui
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName:
hostName: om-query
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName:
hostName: om-frontend
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName:
hostName: om-backend
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName:
hostName: om-synchronizer
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName:
hostName: om-evaluator
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName:
hostName: om-function
grpcPort: 50502
httpPort: 51502
replicas: 3
@ -94,18 +94,17 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -113,10 +112,6 @@ redis:
configmap: |
maxclients 100000
maxmemory 300000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -129,6 +124,7 @@ redis:
slaveCount: 2
serviceAccount:
create: true
name: open-match-redis-service
sysctlImage:
# Enable this setting in production if you are running Open Match under Linux environment
enabled: false
@ -157,42 +153,26 @@ redis:
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: 20s
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: 1m
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
ignoreListTTL: 60000ms
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
install: true
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 200
maxActive: 0
idleTimeout: 0
healthCheckTimeout: 300ms
swaggerui:
enabled: true
# Controls if users need to install scale testing setup for Open Match.
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -205,6 +185,7 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -231,8 +212,8 @@ global:
limits:
memory: 100Mi
cpu: 100m
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -254,21 +235,21 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 1.1.0
tag: 0.9.0
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
telemetry:
reportingPeriod: "1m"
traceSamplingFraction: 0.01 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
samplerFraction: 0.01 # Configures a sampler that samples a given fraction of traces.
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
enabled: false
endpoint: "/metrics"
@ -278,5 +259,4 @@ global:
prefix: "open_match"
grafana:
enabled: false
# This will be called with `tpl` in the open-match-telemetry subchart namespace.
prometheusServer: 'http://{{ include "call-nested" (list . "prometheus" "prometheus.server.fullname") }}.{{ .Release.Namespace }}.svc.cluster.local:80/'
reportingPeriod: "1m"

55
internal/app/appmain.go Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package app contains the common application initialization code for Open Match servers.
package app
import (
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/rpc"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "app.main",
})
)
// RunApplication creates a server.
func RunApplication(serverName string, getCfg func() (config.View, error), bindService func(*rpc.ServerParams, config.View) error) {
cfg, err := getCfg()
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot read configuration.")
}
logging.ConfigureLogging(cfg)
p, err := rpc.NewServerParamsFromConfig(cfg, "api."+serverName)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot construct server.")
}
if err := bindService(p, cfg); err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("failed to bind %s service.", serverName)
}
rpc.MustServeForever(p)
}

View File

@ -15,81 +15,25 @@
package backend
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
ticketsTimeToAssignment = stats.Int64("open-match.dev/backend/ticket_time_to_assignment", "Time to assignment for tickets", stats.UnitMilliseconds)
totalMatchesView = &view.View{
Measure: totalBytesPerMatch,
Name: "open-match.dev/backend/total_matches",
Description: "Total number of matches",
Aggregation: view.Count(),
}
totalBytesPerMatchView = &view.View{
Measure: totalBytesPerMatch,
Name: "open-match.dev/backend/total_bytes_per_match",
Description: "Total bytes per match",
Aggregation: telemetry.DefaultBytesDistribution,
}
ticketsPerMatchView = &view.View{
Measure: ticketsPerMatch,
Name: "open-match.dev/backend/tickets_per_match",
Description: "Tickets per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
ticketsAssignedView = &view.View{
Measure: ticketsAssigned,
Name: "open-match.dev/backend/tickets_assigned",
Description: "Number of tickets assigned per request",
Aggregation: view.Sum(),
}
ticketsReleasedView = &view.View{
Measure: ticketsReleased,
Name: "open-match.dev/backend/tickets_released",
Description: "Number of tickets released per request",
Aggregation: view.Sum(),
}
ticketsTimeToAssignmentView = &view.View{
Measure: ticketsTimeToAssignment,
Name: "open-match.dev/backend/ticket_time_to_assignment",
Description: "Time to assignment for tickets",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the backend service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
func BindService(p *rpc.ServerParams, cfg config.View) error {
service := &backendService{
synchronizer: newSynchronizerClient(p.Config()),
store: statestore.New(p.Config()),
cc: rpc.NewClientCache(p.Config()),
synchronizer: newSynchronizerClient(cfg),
store: statestore.New(cfg),
cc: rpc.NewClientCache(cfg),
}
b.AddHealthCheckFunc(service.store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterBackendServiceServer(s, service)
}, pb.RegisterBackendServiceHandlerFromEndpoint)
b.RegisterViews(
totalMatchesView,
totalBytesPerMatchView,
ticketsPerMatchView,
ticketsAssignedView,
ticketsReleasedView,
ticketsTimeToAssignmentView,
)
return nil
}

View File

@ -22,23 +22,17 @@ import (
"net/http"
"strings"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain/contextcause"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/omerror"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
@ -55,6 +49,10 @@ var (
"app": "openmatch",
"component": "app.backend",
})
mMatchesFetched = telemetry.Counter("backend/matches_fetched", "matches fetched")
mMatchesSentToEvaluation = telemetry.Counter("backend/matches_sent_to_evaluation", "matches sent to evaluation")
mTicketsAssigned = telemetry.Counter("backend/tickets_assigned", "tickets assigned")
mTicketsReleased = telemetry.Counter("backend/tickets_released", "tickets released")
)
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
@ -62,50 +60,56 @@ var (
// FetchMatches immediately returns an error if it encounters any execution failures.
// - If the synchronizer is enabled, FetchMatch will then call the synchronizer to deduplicate proposals with overlapped tickets.
func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.BackendService_FetchMatchesServer) error {
if req.Config == nil {
if req.GetConfig() == nil {
return status.Error(codes.InvalidArgument, ".config is required")
}
if req.Profile == nil {
if req.GetProfile() == nil {
return status.Error(codes.InvalidArgument, ".profile is required")
}
// Error group for handling the synchronizer calls only.
eg, ctx := errgroup.WithContext(stream.Context())
syncStream, err := s.synchronizer.synchronize(ctx)
syncStream, err := s.synchronizer.synchronize(stream.Context())
if err != nil {
return err
}
// The mmf must be canceled if the synchronizer call fails (which will
// cancel the context from the error group). However the synchronizer call
// is NOT dependant on the mmf call.
mmfCtx, cancelMmfs := contextcause.WithCancelCause(ctx)
mmfCtx, cancelMmfs := context.WithCancel(stream.Context())
// Closed when mmfs should start.
startMmfs := make(chan struct{})
proposals := make(chan *pb.Match)
m := &sync.Map{}
eg.Go(func() error {
return synchronizeSend(ctx, syncStream, m, proposals)
})
eg.Go(func() error {
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
synchronizerWait := omerror.WaitOnErrors(logger, func() error {
return synchronizeSend(stream.Context(), syncStream, m, proposals)
}, func() error {
return synchronizeRecv(syncStream, m, stream, startMmfs, cancelMmfs)
})
var mmfErr error
select {
case <-mmfCtx.Done():
mmfErr = fmt.Errorf("mmf was never started")
case <-startMmfs:
mmfErr = callMmf(mmfCtx, s.cc, req, proposals)
}
mmfWait := omerror.WaitOnErrors(logger, func() error {
select {
case <-mmfCtx.Done():
return fmt.Errorf("Mmf was never started")
case <-startMmfs:
}
syncErr := eg.Wait()
return callMmf(mmfCtx, s.cc, req, proposals)
})
syncErr := synchronizerWait()
// Fetch Matches should never block on just the match function.
// Must cancel mmfs after synchronizer is done and before checking mmf error
// because the synchronizer call could fail while the mmf call blocks.
cancelMmfs()
mmfErr := mmfWait()
// TODO: Send mmf error in FetchSummary instead of erroring call.
if syncErr != nil || mmfErr != nil {
logger.WithFields(logrus.Fields{
"syncErr": syncErr,
"mmfErr": mmfErr,
}).Error("error(s) in FetchMatches call.")
return fmt.Errorf(
"error(s) in FetchMatches call. syncErr=[%v], mmfErr=[%v]",
"Error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
syncErr,
mmfErr,
)
@ -124,10 +128,8 @@ sendProposals:
if !ok {
break sendProposals
}
_, loaded := m.LoadOrStore(p.GetMatchId(), p)
if loaded {
return fmt.Errorf("MatchMakingFunction returned same match_id twice: \"%s\"", p.GetMatchId())
}
m.Store(p.GetMatchId(), p)
telemetry.RecordUnitMeasurement(ctx, mMatchesSentToEvaluation)
err := syncStream.Send(&ipb.SynchronizeRequest{Proposal: p})
if err != nil {
return fmt.Errorf("error sending proposal to synchronizer: %w", err)
@ -142,7 +144,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
func synchronizeRecv(syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs context.CancelFunc) error {
var startMmfsOnce sync.Once
for {
@ -161,17 +163,12 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
}
if resp.CancelMmfs {
cancelMmfs(errors.New("match function ran longer than proposal window, canceling"))
cancelMmfs()
}
if v, ok := m.Load(resp.GetMatchId()); ok {
match, ok := v.(*pb.Match)
if !ok {
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
}
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
if match, ok := m.Load(resp.GetMatchId()); ok {
telemetry.RecordUnitMeasurement(stream.Context(), mMatchesFetched)
err = stream.Send(&pb.FetchMatchesResponse{Match: match.(*pb.Match)})
if err != nil {
return fmt.Errorf("error sending match to caller of backend: %w", err)
}
@ -198,17 +195,17 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
var conn *grpc.ClientConn
conn, err := cc.GetGRPC(address)
if err != nil {
return status.Error(codes.InvalidArgument, "failed to establish grpc client connection to match function")
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish grpc client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
}
client := pb.NewMatchFunctionClient(conn)
stream, err := client.Run(ctx, &pb.RunRequest{Profile: profile})
if err != nil {
err = errors.Wrap(err, "failed to run match function for profile")
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
}
logger.WithError(err).Error("failed to run match function for profile")
return err
}
@ -218,11 +215,7 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
break
}
if err != nil {
err = errors.Wrapf(err, "%v.Run() error, %v", client, err)
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
}
logger.Errorf("%v.Run() error, %v\n", client, err)
return err
}
select {
@ -238,8 +231,11 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProfile, address string, proposals chan<- *pb.Match) error {
client, baseURL, err := cc.GetHTTP(address)
if err != nil {
err = errors.Wrapf(err, "failed to establish rest client connection to match function: %s", address)
return status.Error(codes.InvalidArgument, err.Error())
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish rest client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
}
var m jsonpb.Marshaler
@ -255,7 +251,7 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return status.Errorf(codes.Internal, "failed to get response from mmf run for profile %s: %s", profile.Name, err.Error())
return status.Errorf(codes.Internal, "failed to get response from mmf run for proile %s: %s", profile.Name, err.Error())
}
defer func() {
err = resp.Body.Close()
@ -296,60 +292,35 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
}
func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTicketsRequest) (*pb.ReleaseTicketsResponse, error) {
err := s.store.DeleteTicketsFromPendingRelease(ctx, req.GetTicketIds())
err := doReleasetickets(ctx, req, s.store)
if err != nil {
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
logger.WithError(err).Error("failed to remove the awaiting tickets from the ignore list for requested tickets")
return nil, err
}
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
telemetry.RecordNUnitMeasurement(ctx, mTicketsReleased, int64(len(req.TicketIds)))
return &pb.ReleaseTicketsResponse{}, nil
}
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
err := s.store.ReleaseAllTickets(ctx)
if err != nil {
return nil, err
}
return &pb.ReleaseAllTicketsResponse{}, nil
}
// AssignTickets overwrites the Assignment field of the input TicketIds.
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp, err := doAssignTickets(ctx, req, s.store)
err := doAssignTickets(ctx, req, s.store)
if err != nil {
logger.WithError(err).Error("failed to update assignments for requested tickets")
return nil, err
}
numIds := 0
for _, ag := range req.Assignments {
numIds += len(ag.TicketIds)
}
stats.Record(ctx, ticketsAssigned.M(int64(numIds)))
return resp, nil
telemetry.RecordNUnitMeasurement(ctx, mTicketsAssigned, int64(len(req.TicketIds)))
return &pb.AssignTicketsResponse{}, nil
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, tickets, err := store.UpdateAssignments(ctx, req)
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) error {
err := store.UpdateAssignments(ctx, req.GetTicketIds(), req.GetAssignment())
if err != nil {
return nil, err
logger.WithError(err).Error("failed to update assignments")
return err
}
for _, ticket := range tickets {
err = recordTimeToAssignment(ctx, ticket)
if err != nil {
logger.WithError(err).Errorf("failed to record time to assignment for ticket %s", ticket.Id)
}
}
ids := []string{}
for _, ag := range req.Assignments {
ids = append(ids, ag.TicketIds...)
}
for _, id := range ids {
for _, id := range req.GetTicketIds() {
err = store.DeindexTicket(ctx, id)
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
// TODO: consider retry the index operation
@ -358,27 +329,23 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
}
}
if err = store.DeleteTicketsFromPendingRelease(ctx, ids); err != nil {
if err = store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds()); err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": ids,
"ticket_ids": req.GetTicketIds(),
}).Error(err)
}
return resp, nil
return nil
}
func recordTimeToAssignment(ctx context.Context, ticket *pb.Ticket) error {
if ticket.Assignment == nil {
return fmt.Errorf("assignment for ticket %s is nil", ticket.Id)
}
now := time.Now()
created, err := ptypes.Timestamp(ticket.CreateTime)
func doReleasetickets(ctx context.Context, req *pb.ReleaseTicketsRequest, store statestore.Service) error {
err := store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds())
if err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": req.GetTicketIds(),
}).WithError(err).Error("failed to delete the tickets from the ignore list")
return err
}
stats.Record(ctx, ticketsTimeToAssignment.M(now.Sub(created).Milliseconds()))
return nil
}

View File

@ -0,0 +1,309 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package backend
import (
"context"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/statestore"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
)
func TestDoReleaseTickets(t *testing.T) {
fakeProperty := "test-property"
fakeTickets := []*pb.Ticket{
{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 1,
},
},
},
{
Id: "2",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
{
Id: "3",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
}
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service, *pb.Pool)
req *pb.ReleaseTicketsRequest
wantCode codes.Code
pool *pb.Pool
expectTickets []string
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service, pool *pb.Pool) {
cancel()
},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{"1"},
},
wantCode: codes.Unavailable,
},
{
description: "expect ok code when submitted list is empty",
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
expectTickets: []string{"3"},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{},
},
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
// Ignore a few tickets
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2"})
assert.Nil(t, err)
// Make sure it was properly ignored
var ignoredFilterTickets []*pb.Ticket
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
ignoredFilterTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets)-2, len(ignoredFilterTickets))
},
wantCode: codes.OK,
},
{
description: "expect ok code",
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
wantCode: codes.OK,
expectTickets: []string{"1", "2"},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{"1", "2"},
},
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
// Ignore all the tickets
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2", "3"})
assert.Nil(t, err)
// Make sure it was properly ignored
var ignoredFilterTickets []*pb.Ticket
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
ignoredFilterTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets)-3, len(ignoredFilterTickets))
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
test.preAction(ctx, cancel, store, test.pool)
err := doReleasetickets(ctx, test.req, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
// Make sure that the expected tickets are available for query
var filteredTickets []*pb.Ticket
err = store.FilterTickets(ctx, test.pool, 10, func(filterTickets []*pb.Ticket) error {
filteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(filteredTickets), len(test.expectTickets))
for _, ticket := range filteredTickets {
assert.Contains(t, test.expectTickets, ticket.GetId())
}
}
})
}
}
func TestDoAssignTickets(t *testing.T) {
fakeProperty := "test-property"
fakeTickets := []*pb.Ticket{
{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 1,
},
},
},
{
Id: "2",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
}
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service)
req *pb.AssignTicketsRequest
wantCode codes.Code
wantAssignment *pb.Assignment
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1"},
Assignment: &pb.Assignment{},
},
wantCode: codes.Unavailable,
},
{
description: "expect invalid argument code since assignment is nil",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
req: &pb.AssignTicketsRequest{},
wantCode: codes.InvalidArgument,
},
{
description: "expect not found code since ticket does not exist",
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1", "2"},
Assignment: &pb.Assignment{
Connection: "123",
},
},
wantCode: codes.NotFound,
},
{
description: "expect ok code",
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}},
}
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1", "2"},
Assignment: &pb.Assignment{
Connection: "123",
},
},
wantCode: codes.OK,
wantAssignment: &pb.Assignment{
Connection: "123",
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
test.preAction(ctx, cancel, store)
err := doAssignTickets(ctx, test.req, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
for _, id := range test.req.GetTicketIds() {
ticket, err := store.GetTicket(ctx, id)
assert.Nil(t, err)
assert.Equal(t, test.wantAssignment, ticket.GetAssignment())
}
// Make sure tickets are deindexed after assignment
var wantFilteredTickets []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 2}},
}
store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, wantFilteredTickets)
}
})
}
}
// TODOs: add unit tests to doFetchMatchesFilterSkiplistIds and doFetchMatchesAddSkiplistIds

View File

@ -1,57 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package evaluator provides the Evaluator service for Open Match golang harness.
package evaluator
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
matchesPerEvaluateRequest = stats.Int64("open-match.dev/evaluator/matches_per_request", "Number of matches sent to the evaluator per request", stats.UnitDimensionless)
matchesPerEvaluateResponse = stats.Int64("open-match.dev/evaluator/matches_per_response", "Number of matches returned by the evaluator per response", stats.UnitDimensionless)
matchesPerEvaluateRequestView = &view.View{
Measure: matchesPerEvaluateRequest,
Name: "open-match.dev/evaluator/matches_per_request",
Description: "Number of matches sent to the evaluator per request",
Aggregation: telemetry.DefaultCountDistribution,
}
matchesPerEvaluateResponseView = &view.View{
Measure: matchesPerEvaluateResponse,
Name: "open-match.dev/evaluator/matches_per_response",
Description: "Number of matches sent to the evaluator per response",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindServiceFor creates the evaluator service and binds it to the serving harness.
func BindServiceFor(eval Evaluator) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterEvaluatorServer(s, &evaluatorService{eval})
}, pb.RegisterEvaluatorHandlerFromEndpoint)
b.RegisterViews(
matchesPerEvaluateRequestView,
matchesPerEvaluateResponseView,
)
return nil
}
}

View File

@ -15,47 +15,24 @@
package frontend
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerTicketView = &view.View{
Measure: totalBytesPerTicket,
Name: "open-match.dev/frontend/total_bytes_per_ticket",
Description: "Total bytes per ticket",
Aggregation: telemetry.DefaultBytesDistribution,
}
searchFieldsPerTicketView = &view.View{
Measure: searchFieldsPerTicket,
Name: "open-match.dev/frontend/searchfields_per_ticket",
Description: "SearchFields per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindService creates the frontend service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
func BindService(p *rpc.ServerParams, cfg config.View) error {
service := &frontendService{
cfg: p.Config(),
store: statestore.New(p.Config()),
cfg: cfg,
store: statestore.New(cfg),
}
b.AddHealthCheckFunc(service.store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, service)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
b.RegisterViews(
totalBytesPerTicketView,
searchFieldsPerTicketView,
)
return nil
}

View File

@ -18,16 +18,14 @@ import (
"context"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
@ -43,28 +41,26 @@ var (
"app": "openmatch",
"component": "app.frontend",
})
mTicketsCreated = telemetry.Counter("frontend/tickets_created", "tickets created")
mTicketsDeleted = telemetry.Counter("frontend/tickets_deleted", "tickets deleted")
mTicketsRetrieved = telemetry.Counter("frontend/tickets_retrieved", "tickets retrieved")
mTicketAssignmentsRetrieved = telemetry.Counter("frontend/tickets_assignments_retrieved", "ticket assignments retrieved")
)
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.Ticket, error) {
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.CreateTicketResponse, error) {
// Perform input validation.
if req.Ticket == nil {
if req.GetTicket() == nil {
return nil, status.Errorf(codes.InvalidArgument, ".ticket is required")
}
if req.Ticket.Assignment != nil {
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with an assignment")
}
if req.Ticket.CreateTime != nil {
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with create time set")
}
return doCreateTicket(ctx, req, s.store)
}
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.Ticket, error) {
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.CreateTicketResponse, error) {
// Generate a ticket id and create a Ticket in state storage
ticket, ok := proto.Clone(req.Ticket).(*pb.Ticket)
if !ok {
@ -72,44 +68,49 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
}
ticket.Id = xid.New().String()
ticket.CreateTime = ptypes.TimestampNow()
sfCount := 0
sfCount += len(ticket.GetSearchFields().GetDoubleArgs())
sfCount += len(ticket.GetSearchFields().GetStringArgs())
sfCount += len(ticket.GetSearchFields().GetTags())
stats.Record(ctx, searchFieldsPerTicket.M(int64(sfCount)))
stats.Record(ctx, totalBytesPerTicket.M(int64(proto.Size(ticket))))
err := store.CreateTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to create the ticket")
return nil, err
}
err = store.IndexTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to index the ticket")
return nil, err
}
return ticket, nil
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
return &pb.CreateTicketResponse{Ticket: ticket}, nil
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*empty.Empty, error) {
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*pb.DeleteTicketResponse, error) {
err := doDeleteTicket(ctx, req.GetTicketId(), s.store)
if err != nil {
return nil, err
}
return &empty.Empty{}, nil
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
return &pb.DeleteTicketResponse{}, nil
}
func doDeleteTicket(ctx context.Context, id string, store statestore.Service) error {
// Deindex this Ticket to remove it from matchmaking pool.
err := store.DeindexTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to deindex the ticket")
return err
}
@ -125,12 +126,12 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
"id": id,
}).Error("failed to delete the ticket")
}
err = store.DeleteTicketsFromPendingRelease(ctx, []string{id})
err = store.DeleteTicketsFromIgnoreList(ctx, []string{id})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to delete the ticket from pendingRelease")
}).Error("failed to delete the ticket from ignorelist")
}
// TODO: If other redis queues are implemented or we have custom index fields
// created by Open Match, those need to be cleaned up here.
@ -140,12 +141,26 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// GetTicket get the Ticket associated with the specified TicketId.
func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketRequest) (*pb.Ticket, error) {
return s.store.GetTicket(ctx, req.GetTicketId())
telemetry.RecordUnitMeasurement(ctx, mTicketsRetrieved)
return doGetTickets(ctx, req.GetTicketId(), s.store)
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
func doGetTickets(ctx context.Context, id string, store statestore.Service) (*pb.Ticket, error) {
ticket, err := store.GetTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to get the ticket")
return nil, err
}
return ticket, nil
}
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
func (s *frontendService) GetAssignments(req *pb.GetAssignmentsRequest, stream pb.FrontendService_GetAssignmentsServer) error {
ctx := stream.Context()
for {
select {
@ -153,14 +168,15 @@ func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stre
return ctx.Err()
default:
sender := func(assignment *pb.Assignment) error {
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
telemetry.RecordUnitMeasurement(ctx, mTicketAssignmentsRetrieved)
return stream.Send(&pb.GetAssignmentsResponse{Assignment: assignment})
}
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)
return doGetAssignments(ctx, req.GetTicketId(), sender, s.store)
}
}
}
func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
func doGetAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
var currAssignment *pb.Assignment
var ok bool
callback := func(assignment *pb.Assignment) error {
@ -172,6 +188,7 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
err := sender(currAssignment)
if err != nil {
logger.WithError(err).Error("failed to send Redis response to grpc server")
return status.Errorf(codes.Aborted, err.Error())
}
}

View File

@ -23,7 +23,7 @@ import (
"time"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/statestore"
@ -68,7 +68,6 @@ func TestDoCreateTickets(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
@ -77,18 +76,18 @@ func TestDoCreateTickets(t *testing.T) {
test.preAction(cancel)
res, err := doCreateTicket(ctx, &pb.CreateTicketRequest{Ticket: test.ticket}, store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
require.True(t, matched)
require.Nil(t, err)
require.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetTicket().GetId())
assert.True(t, matched)
assert.Nil(t, err)
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.Ticket.SearchFields.DoubleArgs["test-arg"])
}
})
}
}
func TestDoWatchAssignments(t *testing.T) {
func TestDoGetAssignments(t *testing.T) {
testTicket := &pb.Ticket{
Id: "test-id",
}
@ -118,20 +117,12 @@ func TestDoWatchAssignments(t *testing.T) {
{
description: "expect two assignment reads from preAction writes and fail in grpc aborted code",
preAction: func(ctx context.Context, t *testing.T, store statestore.Service, wantAssignments []*pb.Assignment, wg *sync.WaitGroup) {
require.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.CreateTicket(ctx, testTicket))
go func(wg *sync.WaitGroup) {
for i := 0; i < len(wantAssignments); i++ {
time.Sleep(50 * time.Millisecond)
_, _, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{testTicket.GetId()},
Assignment: wantAssignments[i],
},
},
})
require.Nil(t, err)
assert.Nil(t, store.UpdateAssignments(ctx, []string{testTicket.GetId()}, wantAssignments[i]))
wg.Done()
}
}(wg)
@ -154,12 +145,12 @@ func TestDoWatchAssignments(t *testing.T) {
gotAssignments := []*pb.Assignment{}
test.preAction(ctx, t, store, test.wantAssignments, &wg)
err := doWatchAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
err := doGetAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
wg.Wait()
for i := 0; i < len(gotAssignments); i++ {
require.Equal(t, gotAssignments[i], test.wantAssignments[i])
assert.Equal(t, gotAssignments[i], test.wantAssignments[i])
}
})
}
@ -202,7 +193,6 @@ func TestDoDeleteTicket(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
@ -211,7 +201,7 @@ func TestDoDeleteTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
err := doDeleteTicket(ctx, fakeTicket.GetId(), store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
assert.Equal(t, test.wantCode, status.Convert(err).Code())
})
}
}
@ -256,7 +246,6 @@ func TestDoGetTicket(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
@ -264,12 +253,12 @@ func TestDoGetTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
ticket, err := store.GetTicket(ctx, fakeTicket.GetId())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
ticket, err := doGetTickets(ctx, fakeTicket.GetId(), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
require.Equal(t, test.wantTicket.GetId(), ticket.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
assert.Equal(t, test.wantTicket.GetId(), ticket.GetId())
assert.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
}
})
}

View File

@ -19,24 +19,25 @@ import (
"open-match.dev/open-match/internal/app/frontend"
"open-match.dev/open-match/internal/app/query"
"open-match.dev/open-match/internal/app/synchronizer"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
)
// BindService creates the minimatch service to the server Params.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
if err := backend.BindService(p, b); err != nil {
func BindService(p *rpc.ServerParams, cfg config.View) error {
if err := backend.BindService(p, cfg); err != nil {
return err
}
if err := frontend.BindService(p, b); err != nil {
if err := frontend.BindService(p, cfg); err != nil {
return err
}
if err := query.BindService(p, b); err != nil {
if err := query.BindService(p, cfg); err != nil {
return err
}
if err := synchronizer.BindService(p, b); err != nil {
if err := synchronizer.BindService(p, cfg); err != nil {
return err
}

View File

@ -15,76 +15,25 @@
package query
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
var (
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
ticketsPerQueryView = &view.View{
Measure: ticketsPerQuery,
Name: "open-match.dev/query/tickets_per_query",
Description: "Tickets per query",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheTotalItemsView = &view.View{
Measure: cacheTotalItems,
Name: "open-match.dev/query/total_cached_items",
Description: "Total number of cached tickets",
Aggregation: view.LastValue(),
}
cacheFetchedItemsView = &view.View{
Measure: cacheFetchedItems,
Name: "open-match.dev/query/total_fetched_items",
Description: "Total number of fetched tickets",
Aggregation: view.Sum(),
}
cacheUpdateView = &view.View{
Measure: cacheWaitingQueries,
Name: "open-match.dev/query/cache_updates",
Description: "Number of query cache updates in total",
Aggregation: view.Count(),
}
cacheWaitingQueriesView = &view.View{
Measure: cacheWaitingQueries,
Name: "open-match.dev/query/waiting_requests",
Description: "Number of waiting requests in total",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheUpdateLatencyView = &view.View{
Measure: cacheUpdateLatency,
Name: "open-match.dev/query/update_latency",
Description: "Time elapsed of each query cache update",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the query service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
func BindService(p *rpc.ServerParams, cfg config.View) error {
service := &queryService{
cfg: p.Config(),
tc: newTicketCache(b, p.Config()),
cfg: cfg,
store: statestore.New(cfg),
}
b.AddHandleFunc(func(s *grpc.Server) {
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterQueryServiceServer(s, service)
}, pb.RegisterQueryServiceHandlerFromEndpoint)
b.RegisterViews(
ticketsPerQueryView,
cacheTotalItemsView,
cacheUpdateView,
cacheFetchedItemsView,
cacheWaitingQueriesView,
cacheUpdateLatencyView,
)
return nil
}

View File

@ -16,20 +16,14 @@ package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/internal/statestore"
)
var (
@ -42,101 +36,49 @@ var (
// queryService API provides utility functions for common MMF functionality such
// as retreiving Tickets from state storage.
type queryService struct {
cfg config.View
tc *ticketCache
cfg config.View
store statestore.Service
}
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
if err != nil {
return err
}
var results []*pb.Ticket
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
}
}
})
if err != nil {
err = errors.Wrap(err, "QueryTickets: failed to run request")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
ctx := responseServer.Context()
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryTicketsResponse{
Tickets: results[start:end],
})
callback := func(tickets []*pb.Ticket) error {
err := responseServer.Send(&pb.QueryTicketsResponse{Tickets: tickets})
if err != nil {
return err
logger.WithError(err).Error("Failed to send Redis response to grpc server")
return status.Errorf(codes.Aborted, err.Error())
}
return nil
}
return nil
return doQueryTickets(ctx, pool, pSize, callback, s.store)
}
func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseServer pb.QueryService_QueryTicketIdsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
func doQueryTickets(ctx context.Context, pool *pb.Pool, pageSize int, sender func(tickets []*pb.Ticket) error, store statestore.Service) error {
// Send requests to the storage service
err := store.FilterTickets(ctx, pool, pageSize, sender)
if err != nil {
logger.WithError(err).Error("Failed to retrieve result from storage service.")
return err
}
var results []string
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
}
}
})
if err != nil {
err = errors.Wrap(err, "QueryTicketIds: failed to run request")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryTicketIdsResponse{
Ids: results[start:end],
})
if err != nil {
return err
}
}
return nil
}
func getPageSize(cfg config.View) int {
const (
name = "queryPageSize"
name = "storage.page.size"
// Minimum number of tickets to be returned in a streamed response for QueryTickets. This value
// will be used if page size is configured lower than the minimum value.
minPageSize int = 10
@ -152,7 +94,7 @@ func getPageSize(cfg config.View) int {
return defaultPageSize
}
pSize := cfg.GetInt(name)
pSize := cfg.GetInt("storage.page.size")
if pSize < minPageSize {
logger.Infof("page size %v is lower than the minimum limit of %v", pSize, maxPageSize)
pSize = minPageSize
@ -165,159 +107,3 @@ func getPageSize(cfg config.View) int {
return pSize
}
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// ticketCache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type ticketCache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Mutlithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
tickets map[string]*pb.Ticket
err error
}
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
tickets: make(map[string]*pb.Ticket),
}
tc.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
case <-tc.startRunRequest:
go tc.runRequest()
case tc.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
case <-cr.runNow:
defer tc.wg.Done()
}
if tc.err != nil {
return tc.err
}
f(tc.tickets)
return nil
}
func (tc *ticketCache) runRequest() {
defer func() {
tc.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-tc.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-tc.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
tc.update()
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
for _, req := range reqs {
tc.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
tc.wg.Done()
}
}
// wait for requests to finish using ticket cache.
tc.wg.Wait()
}
func (tc *ticketCache) update() {
st := time.Now()
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
if err != nil {
tc.err = err
return
}
deletedCount := 0
for id := range tc.tickets {
if _, ok := currentAll[id]; !ok {
delete(tc.tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tc.tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
if err != nil {
tc.err = err
return
}
for _, t := range newTickets {
tc.tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

View File

@ -15,13 +15,139 @@
package query
import (
"context"
"errors"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
internalTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
)
func TestDoQueryTickets(t *testing.T) {
const (
DoubleArg1 = "level"
DoubleArg2 = "spd"
)
var actualTickets []*pb.Ticket
fakeErr := errors.New("some error")
senderGenerator := func(err error) func(tickets []*pb.Ticket) error {
return func(tickets []*pb.Ticket) error {
if err != nil {
return err
}
actualTickets = tickets
return err
}
}
testTickets := internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 20, Interval: 5},
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
)
tests := []struct {
description string
sender func(tickets []*pb.Ticket) error
pool *pb.Pool
pageSize int
action func(context.Context, *testing.T, statestore.Service)
wantErr error
wantTickets []*pb.Ticket
}{
{
"expect empty response from an empty store",
senderGenerator(nil),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(_ context.Context, _ *testing.T, _ statestore.Service) {},
nil,
nil,
},
{
"expect tickets with DoubleArg1 value in range of [0, 10] (inclusively)",
senderGenerator(nil),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(ctx context.Context, t *testing.T, store statestore.Service) {
for _, testTicket := range testTickets {
assert.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.IndexTicket(ctx, testTicket))
}
},
nil,
internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 10.1, Interval: 5},
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
),
},
{
"expect error from canceled context",
senderGenerator(fakeErr),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(ctx context.Context, t *testing.T, store statestore.Service) {
for _, testTicket := range testTickets {
assert.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.IndexTicket(ctx, testTicket))
}
},
status.Errorf(codes.Internal, "%v", fakeErr),
nil,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cfg := viper.New()
cfg.Set("storage.page.size", 1000)
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
test.action(ctx, t, store)
assert.Equal(t, test.wantErr, doQueryTickets(ctx, test.pool, test.pageSize, test.sender, store))
for _, wantTicket := range test.wantTickets {
assert.Contains(t, actualTickets, wantTicket)
}
})
}
}
func TestGetPageSize(t *testing.T) {
testCases := []struct {
name string
@ -36,33 +162,34 @@ func TestGetPageSize(t *testing.T) {
{
"set",
func(cfg config.Mutable) {
cfg.Set("queryPageSize", "2156")
cfg.Set("storage.page.size", "2156")
},
2156,
},
{
"low",
func(cfg config.Mutable) {
cfg.Set("queryPageSize", "9")
cfg.Set("storage.page.size", "9")
},
10,
},
{
"high",
func(cfg config.Mutable) {
cfg.Set("queryPageSize", "10001")
cfg.Set("storage.page.size", "10001")
},
10000,
},
}
for _, tt := range testCases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
cfg := viper.New()
tt.configure(cfg)
actual := getPageSize(cfg)
require.Equal(t, tt.expected, actual)
if actual != tt.expected {
t.Errorf("got %d, want %d", actual, tt.expected)
}
})
}
}

View File

@ -53,6 +53,8 @@ func RunApplication() {
func serve(cfg config.View) {
mux := &http.ServeMux{}
closer := telemetry.Setup("swaggerui", mux, cfg)
defer closer()
port := cfg.GetInt("api.swaggerui.httpport")
baseDir, err := os.Getwd()
if err != nil {

View File

@ -24,10 +24,11 @@ import (
"github.com/golang/protobuf/jsonpb"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/omerror"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/pkg/pb"
)
@ -40,10 +41,10 @@ var (
)
type evaluator interface {
evaluate(context.Context, <-chan []*pb.Match, chan<- string) error
evaluate(context.Context, <-chan []*pb.Match) ([]string, error)
}
var errNoEvaluatorType = status.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
var errNoEvaluatorType = grpc.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
func newEvaluator(cfg config.View) evaluator {
newInstance := func(cfg config.View) (interface{}, func(), error) {
@ -66,17 +67,17 @@ type deferredEvaluator struct {
cacher *config.Cacher
}
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
e, err := de.cacher.Get()
if err != nil {
return err
return nil, err
}
err = e.(evaluator).evaluate(ctx, pc, acceptedIds)
matches, err := e.(evaluator).evaluate(ctx, pc)
if err != nil {
de.cacher.ForceReset()
}
return err
return matches, err
}
type grcpEvaluatorClient struct {
@ -87,7 +88,7 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
grpcAddr := fmt.Sprintf("%s:%d", cfg.GetString("api.evaluator.hostname"), cfg.GetInt64("api.evaluator.grpcport"))
conn, err := rpc.GRPCClientFromEndpoint(cfg, grpcAddr)
if err != nil {
return nil, nil, fmt.Errorf("failed to create grpc evaluator client: %w", err)
return nil, nil, fmt.Errorf("Failed to create grpc evaluator client: %w", err)
}
evaluatorClientLogger.WithFields(logrus.Fields{
@ -106,26 +107,21 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
}, close, nil
}
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
eg, ctx := errgroup.WithContext(ctx)
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
var stream pb.Evaluator_EvaluateClient
{ // prevent shadowing err later
var err error
stream, err = ec.evaluator.Evaluate(ctx)
if err != nil {
return fmt.Errorf("error starting evaluator call: %w", err)
return nil, fmt.Errorf("Error starting evaluator call: %w", err)
}
}
matchIDs := &sync.Map{}
eg.Go(func() error {
results := []string{}
wait := omerror.WaitOnErrors(evaluatorClientLogger, func() error {
for proposals := range pc {
for _, proposal := range proposals {
if _, ok := matchIDs.LoadOrStore(proposal.GetMatchId(), true); ok {
return fmt.Errorf("multiple match functions used same match_id: \"%s\"", proposal.GetMatchId())
}
if err := stream.Send(&pb.EvaluateRequest{Match: proposal}); err != nil {
return fmt.Errorf("failed to send request to evaluator, desc: %w", err)
}
@ -136,9 +132,7 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
return fmt.Errorf("failed to close the send direction of evaluator stream, desc: %w", err)
}
return nil
})
eg.Go(func() error {
}, func() error {
for {
// TODO: add grpc timeouts for this call.
resp, err := stream.Recv()
@ -148,24 +142,15 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
if err != nil {
return fmt.Errorf("failed to get response from evaluator client, desc: %w", err)
}
v, ok := matchIDs.Load(resp.GetMatchId())
if !ok {
return fmt.Errorf("evaluator returned match_id \"%s\" which does not correspond to its any match in its input", resp.GetMatchId())
}
if !v.(bool) {
return fmt.Errorf("evaluator returned same match_id twice: \"%s\"", resp.GetMatchId())
}
matchIDs.Store(resp.GetMatchId(), false)
acceptedIds <- resp.GetMatchId()
results = append(results, resp.GetMatchId())
}
})
err := eg.Wait()
err := wait()
if err != nil {
return err
return nil, err
}
return nil
return results, nil
}
type httpEvaluatorClient struct {
@ -194,7 +179,7 @@ func newHTTPEvaluator(cfg config.View) (evaluator, func(), error) {
}, close, nil
}
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
reqr, reqw := io.Pipe()
var wg sync.WaitGroup
wg.Add(1)
@ -227,14 +212,14 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
req, err := http.NewRequest("POST", ec.baseURL+"/v1/evaluator/matches:evaluate", reqr)
if err != nil {
return status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
return nil, status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Transfer-Encoding", "chunked")
resp, err := ec.httpClient.Do(req.WithContext(ctx))
if err != nil {
return status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
return nil, status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
}
defer func() {
if resp.Body.Close() != nil {
@ -243,6 +228,7 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
}()
wg.Add(1)
var results = []string{}
rc := make(chan error, 1)
defer close(rc)
go func() {
@ -271,16 +257,16 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
rc <- status.Errorf(codes.Unavailable, "failed to execute jsonpb.UnmarshalString(%s, &proposal): %v.", item.Result, err)
return
}
acceptedIds <- resp.GetMatchId()
results = append(results, resp.GetMatchId())
}
}()
wg.Wait()
if len(sc) != 0 {
return <-sc
return nil, <-sc
}
if len(rc) != 0 {
return <-rc
return nil, <-rc
}
return nil
return results, nil
}

View File

@ -15,52 +15,21 @@
package synchronizer
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
)
var (
iterationLatency = stats.Float64("open-match.dev/synchronizer/iteration_latency", "Time elapsed of each synchronizer iteration", stats.UnitMilliseconds)
registrationWaitTime = stats.Float64("open-match.dev/synchronizer/registration_wait_time", "Time elapsed of registration wait time", stats.UnitMilliseconds)
registrationMMFDoneTime = stats.Float64("open-match.dev/synchronizer/registration_mmf_done_time", "Time elapsed wasted in registration window with done MMFs", stats.UnitMilliseconds)
iterationLatencyView = &view.View{
Measure: iterationLatency,
Name: "open-match.dev/synchronizer/iteration_latency",
Description: "Time elapsed of each synchronizer iteration",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
registrationWaitTimeView = &view.View{
Measure: registrationWaitTime,
Name: "open-match.dev/synchronizer/registration_wait_time",
Description: "Time elapsed of registration wait time",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
registrationMMFDoneTimeView = &view.View{
Measure: registrationMMFDoneTime,
Name: "open-match.dev/synchronizer/registration_mmf_done_time",
Description: "Time elapsed wasted in registration window with done MMFs",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the synchronizer service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
store := statestore.New(p.Config())
service := newSynchronizerService(p.Config(), newEvaluator(p.Config()), store)
b.AddHealthCheckFunc(store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
func BindService(p *rpc.ServerParams, cfg config.View) error {
store := statestore.New(cfg)
service := newSynchronizerService(cfg, newEvaluator(cfg), store)
p.AddHealthCheckFunc(store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
ipb.RegisterSynchronizerServer(s, service)
}, nil)
b.RegisterViews(
iterationLatencyView,
registrationWaitTimeView,
registrationMMFDoneTimeView,
)
return nil
}

View File

@ -21,10 +21,7 @@ import (
"sync"
"time"
"go.opencensus.io/stats"
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/appmain/contextcause"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/statestore"
@ -43,7 +40,7 @@ var (
// Streams from multiple GRPC calls of matches are combined on a single channel.
// These matches are sent to the evaluator, then the tickets are added to the
// pending release list. Finally the matches are returned to the calling stream.
// ignore list. Finally the matches are returned to the calling stream.
// receive from backend | Synchronize
// -> m1c ->
@ -55,7 +52,7 @@ var (
// -> m4c -> (buffered)
// send to evaluator | wrapEvaluator
// -> m5c -> (buffered)
// add tickets to pending release | addMatchesToPendingRelease
// add tickets to ignore list | addMatchesToIgnoreList
// -> m6c ->
// fan out to origin synchronize call | fanInFanOut
// -> (Synchronize call specific ) m7c -> (buffered)
@ -126,11 +123,7 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
select {
case mIDs, ok := <-m6cBuffer:
if !ok {
// Prevent race: An error will result in this channel being
// closed as part of cleanup. If it's especially fast, it may
// beat the context done case, so be sure to return any
// potential error.
return registration.cycleCtx.Err()
return nil
}
for _, mID := range mIDs {
err = stream.Send(&ipb.SynchronizeResponse{MatchId: mID})
@ -188,9 +181,6 @@ func (s synchronizerService) register(ctx context.Context) *registration {
resp: make(chan *registration),
ctx: ctx,
}
st := time.Now()
defer stats.Record(ctx, registrationWaitTime.M(float64(time.Since(st))/float64(time.Millisecond)))
for {
select {
case s.synchronizeRegistration <- req:
@ -208,9 +198,8 @@ func (s synchronizerService) register(ctx context.Context) *registration {
///////////////////////////////////////
func (s *synchronizerService) runCycle() {
cst := time.Now()
/////////////////////////////////////// Initialize cycle
ctx, cancel := contextcause.WithCancelCause(context.Background())
ctx, cancel := withCancelCause(context.Background())
m2c := make(chan mAndM6c)
m3c := make(chan *pb.Match)
@ -240,14 +229,13 @@ func (s *synchronizerService) runCycle() {
go s.cacheMatchIDToTicketIDs(matchTickets, m3c, m4c)
go s.wrapEvaluator(ctx, cancel, bufferMatchChannel(m4c), m5c)
go func() {
s.addMatchesToPendingRelease(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for pending release, but not all matches returned, the next cycle
s.addMatchesToIgnoreList(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for ignore list, but not all matches returned, the next cycle
// can start now.
close(closedOnCycleEnd)
}()
/////////////////////////////////////// Run Registration Period
rst := time.Now()
closeRegistration := time.After(s.registrationInterval())
Registration:
for {
@ -280,7 +268,6 @@ Registration:
go func() {
allM1cSent.Wait()
m1c.cutoff()
stats.Record(ctx, registrationMMFDoneTime.M(float64((s.registrationInterval()-time.Since(rst))/time.Millisecond)))
}()
cancelProposalCollection := time.AfterFunc(s.proposalCollectionInterval(), func() {
@ -290,7 +277,6 @@ Registration:
}
})
<-closedOnCycleEnd
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
// Clean up in case it was never needed.
cancelProposalCollection.Stop()
@ -401,9 +387,13 @@ func (c *cutoffSender) cutoff() {
///////////////////////////////////////
// Calls the evaluator with the matches.
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel contextcause.CancelErrFunc, m4c <-chan []*pb.Match, m5c chan<- string) {
err := s.eval.evaluate(ctx, m4c, m5c)
if err != nil {
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel cancelErrFunc, m3c <-chan []*pb.Match, m5c chan<- string) {
matchIDs, err := s.eval.evaluate(ctx, m3c)
if err == nil {
for _, mID := range matchIDs {
m5c <- mID
}
} else {
logger.WithFields(logrus.Fields{
"error": err,
}).Error("error calling evaluator, canceling cycle")
@ -435,10 +425,10 @@ func getTicketIds(tickets []*pb.Ticket) []string {
///////////////////////////////////////
// Calls statestore to add all of the tickets returned by the evaluator to the
// pendingRelease list. If it partially fails for whatever reason (not all tickets will
// ignorelist. If it partially fails for whatever reason (not all tickets will
// nessisarily be in the same call), only the matches which can be safely
// returned to the Synchronize calls are.
func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel cancelErrFunc, m5c <-chan []string, m6c chan<- string) {
totalMatches := 0
successfulMatches := 0
var lastErr error
@ -453,7 +443,7 @@ func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m
}
}
err := s.store.AddTicketsToPendingRelease(ctx, ids)
err := s.store.AddTicketsToIgnoreList(ctx, ids)
totalMatches += len(mIDs)
if err == nil {
@ -472,10 +462,10 @@ func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m
"error": lastErr.Error(),
"totalMatches": totalMatches,
"successfulMatches": successfulMatches,
}).Error("some or all matches were not successfully added to the pending release, failed matches dropped")
}).Error("some or all matches were not successfully added to the ignore list, failed matches dropped")
if successfulMatches == 0 {
cancel(fmt.Errorf("no matches successfully added to the pending release. Last error: %w", lastErr))
cancel(fmt.Errorf("no matches successfully added to the ignore list. Last error: %w", lastErr))
}
}
close(m6c)
@ -486,7 +476,7 @@ func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m
func (s *synchronizerService) registrationInterval() time.Duration {
const (
name = "registrationInterval"
name = "synchronizer.registrationIntervalMs"
defaultInterval = time.Second
)
@ -499,7 +489,7 @@ func (s *synchronizerService) registrationInterval() time.Duration {
func (s *synchronizerService) proposalCollectionInterval() time.Duration {
const (
name = "proposalCollectionInterval"
name = "synchronizer.proposalCollectionIntervalMs"
defaultInterval = 10 * time.Second
)
@ -588,3 +578,46 @@ func bufferStringChannel(in chan string) chan []string {
}()
return out
}
///////////////////////////////////////
///////////////////////////////////////
// withCancelCause returns a copy of parent with a new Done channel. The
// returned context's Done channel is closed when the returned cancel function
// is called or when the parent context's Done channel is closed, whichever
// happens first. Unlike the conext package's WithCancel, the cancel func takes
// an error, and will return that error on subsequent calls to Err().
func withCancelCause(parent context.Context) (context.Context, cancelErrFunc) {
parent, cancel := context.WithCancel(parent)
ctx := &contextWithCancelCause{
Context: parent,
}
return ctx, func(err error) {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil && parent.Err() == nil {
ctx.err = err
}
cancel()
}
}
type cancelErrFunc func(err error)
type contextWithCancelCause struct {
context.Context
m sync.Mutex
err error
}
func (ctx *contextWithCancelCause) Err() error {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil {
return ctx.Context.Err()
}
return ctx.err
}

View File

@ -1,220 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package appmain contains the common application initialization code for Open Match servers.
package appmain
import (
"context"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"go.opencensus.io/stats/view"
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "app.main",
})
)
// RunApplication starts and runs the given application forever. For use in
// main functions to run the full application.
func RunApplication(serviceName string, bindService Bind) {
c := make(chan os.Signal, 1)
// SIGTERM is signaled by k8s when it wants a pod to stop.
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
readConfig := func() (config.View, error) {
return config.Read()
}
a, err := NewApplication(serviceName, bindService, readConfig, net.Listen)
if err != nil {
logger.Fatal(err)
}
<-c
err = a.Stop()
if err != nil {
logger.Fatal(err)
}
logger.Info("Application stopped successfully.")
}
// Bind is a function which starts an application, and binds it to serving.
type Bind func(p *Params, b *Bindings) error
// Params are inputs to starting an application.
type Params struct {
config config.View
serviceName string
}
// Config provides the configuration for the application.
func (p *Params) Config() config.View {
return p.config
}
// ServiceName is a name for the currently running binary specified by
// RunApplication.
func (p *Params) ServiceName() string {
return p.serviceName
}
// Bindings allows applications to bind various functions to the running servers.
type Bindings struct {
sp *rpc.ServerParams
a *App
firstErr error
}
// AddHealthCheckFunc allows an application to check if it is healthy, and
// contribute to the overall server health.
func (b *Bindings) AddHealthCheckFunc(f func(context.Context) error) {
b.sp.AddHealthCheckFunc(f)
}
// RegisterViews begins collecting data for the given views.
func (b *Bindings) RegisterViews(v ...*view.View) {
if err := view.Register(v...); err != nil {
if b.firstErr == nil {
b.firstErr = err
}
return
}
b.AddCloser(func() {
view.Unregister(v...)
})
}
// AddHandleFunc adds a protobuf service to the grpc server which is starting.
func (b *Bindings) AddHandleFunc(handlerFunc rpc.GrpcHandler, grpcProxyHandler rpc.GrpcProxyHandler) {
b.sp.AddHandleFunc(handlerFunc, grpcProxyHandler)
}
// TelemetryHandle adds a handler to the mux for serving debug info and metrics.
func (b *Bindings) TelemetryHandle(pattern string, handler http.Handler) {
b.sp.ServeMux.Handle(pattern, handler)
}
// TelemetryHandleFunc adds a handlerfunc to the mux for serving debug info and metrics.
func (b *Bindings) TelemetryHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
b.sp.ServeMux.HandleFunc(pattern, handler)
}
// AddCloser specifies a function to be called when the application is being
// stopped. Closers are called in reverse order.
func (b *Bindings) AddCloser(c func()) {
b.a.closers = append(b.a.closers, func() error {
c()
return nil
})
}
// AddCloserErr specifies a function to be called when the application is being
// stopped. Closers are called in reverse order. The first error returned by
// a closer will be logged.
func (b *Bindings) AddCloserErr(c func() error) {
b.a.closers = append(b.a.closers, c)
}
// App is used internally, and public only for apptest. Do not use, and use apptest instead.
type App struct {
closers []func() error
}
// NewApplication is used internally, and public only for apptest. Do not use, and use apptest instead.
func NewApplication(serviceName string, bindService Bind, getCfg func() (config.View, error), listen func(network, address string) (net.Listener, error)) (*App, error) {
a := &App{}
cfg, err := getCfg()
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot read configuration.")
}
logging.ConfigureLogging(cfg)
sp, err := rpc.NewServerParamsFromConfig(cfg, "api."+serviceName, listen)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot construct server.")
}
p := &Params{
config: cfg,
serviceName: serviceName,
}
b := &Bindings{
a: a,
sp: sp,
}
err = telemetry.Setup(p, b)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
err = bindService(p, b)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
if b.firstErr != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, b.firstErr
}
s := &rpc.Server{}
err = s.Start(sp)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
b.AddCloserErr(s.Stop)
return a, nil
}
// Stop is used internally, and public only for apptest. Do not use, and use apptest instead.
func (a *App) Stop() error {
// Use closers in reverse order: Since dependencies are created before
// their dependants, this helps ensure no dependencies are closed
// unexpectedly.
var firstErr error
for i := len(a.closers) - 1; i >= 0; i-- {
err := a.closers[i]()
if firstErr == nil {
firstErr = err
}
}
return firstErr
}

View File

@ -1,156 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package apptest allows testing of binded services within memory.
package apptest
import (
"net"
"testing"
"github.com/pkg/errors"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
)
// ServiceName is a constant used for all in memory tests.
const ServiceName = "test"
// TestApp starts an application for testing. It will automatically stop after
// the test completes, and immediately fail the test if there is an error
// starting. The caller must provide the listers to use for the app, this way
// the listeners can use a random port, and set the proper values on the config.
func TestApp(t *testing.T, cfg config.View, listeners []net.Listener, binds ...appmain.Bind) {
ls, err := newListenerStorage(listeners)
if err != nil {
t.Fatal(err)
}
getCfg := func() (config.View, error) {
return cfg, nil
}
app, err := appmain.NewApplication(ServiceName, bindAll(binds), getCfg, ls.listen)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
err := app.Stop()
if err != nil {
t.Fatal(err)
}
})
}
// RunInCluster allows for running services during an in cluster e2e test.
// This is NOT for running the actual code under test, but instead allow running
// auxiliary services the code under test might call.
func RunInCluster(binds ...appmain.Bind) (func() error, error) {
readConfig := func() (config.View, error) {
return config.Read()
}
app, err := appmain.NewApplication(ServiceName, bindAll(binds), readConfig, net.Listen)
if err != nil {
return nil, err
}
return app.Stop, nil
}
func bindAll(binds []appmain.Bind) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
for _, bind := range binds {
bindErr := bind(p, b)
if bindErr != nil {
return bindErr
}
}
return nil
}
}
func newFullAddr(network, address string) (fullAddr, error) {
a := fullAddr{
network: network,
}
var err error
a.host, a.port, err = net.SplitHostPort(address)
if err != nil {
return fullAddr{}, err
}
// Usually listeners are started with an "unspecified" ip address, which has
// several equivalent forms: ":80", "0.0.0.0:80", "[::]:80". Even if the
// callers use the same form, the listeners may return a different form when
// asked for its address. So detect and revert to the simpler form.
if net.ParseIP(a.host).IsUnspecified() {
a.host = ""
}
return a, nil
}
type fullAddr struct {
network string
host string
port string
}
type listenerStorage struct {
l map[fullAddr]net.Listener
}
func newListenerStorage(listeners []net.Listener) (*listenerStorage, error) {
ls := &listenerStorage{
l: make(map[fullAddr]net.Listener),
}
for _, l := range listeners {
a, err := newFullAddr(l.Addr().Network(), l.Addr().String())
if err != nil {
return nil, err
}
ls.l[a] = l
}
return ls, nil
}
func (ls *listenerStorage) listen(network, address string) (net.Listener, error) {
a, err := newFullAddr(network, address)
if err != nil {
return nil, err
}
l, ok := ls.l[a]
if !ok {
return nil, errors.Errorf("Listener for \"%s\" was not passed to TestApp or was already used", address)
}
delete(ls.l, a)
return l, nil
}
// GRPCClient creates a new client which connects to the specified service. It
// immediately fails the test if there is an error, and will also automatically
// close after the test completes.
func GRPCClient(t *testing.T, cfg config.View, service string) *grpc.ClientConn {
conn, err := rpc.GRPCClientFromConfig(cfg, service)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
err := conn.Close()
if err != nil {
t.Fatal(err)
}
})
return conn
}

Some files were not shown because too many files have changed in this diff Show More