Compare commits

..

3 Commits

Author SHA1 Message Date
4bb8ed2f81 Release 1.0 () 2020-06-01 13:47:41 -07:00
12ad7d32cd Have 1.0 tutorials go mod files reference 1.0.0-rc.1 () 2020-06-01 11:45:36 -07:00
5f8febb517 Release 1.0.0-rc.1 () 2020-05-11 13:43:17 -07:00
227 changed files with 6310 additions and 28552 deletions
.github
Makefile
api
cloudbuild.yaml
cmd/swaggerui
docs
examples
go.modgo.sum
install/helm/open-match
internal
pkg
third_party
protoc-gen-openapiv2/options
protoc-gen-swagger/options
swaggerui
tools/certgen
tutorials
custom_evaluator
default_evaluator
matchmaker101
matchmaker102

1
.github/CODEOWNERS vendored

@ -1 +0,0 @@
* @laremere @aLekSer @HazWard @calebatwd @syntxerror @sawagh @andrewgrundy

@ -114,6 +114,7 @@ git push origin release-0.5
- [ ] There might be additional references to the old version but be careful not to change it for places that have it for historical purposes.
- [ ] Run `make release`
- [ ] Run `make api/api.md` in open-match repo to update the auto-generated API references in open-match-docs repo.
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
- [ ] Create a PR with the changes, include the release candidate name, and point it to the release branch.
- [ ] Go to [open-match-build](https://pantheon.corp.google.com/cloud-build/triggers?project=open-match-build) and update all *post submit* triggers' `_GCB_LATEST_VERSION` value to the `X.Y` of the release. This value should only increase as it's used to determine the latest stable version.
- [ ] Merge your changes once the PR is approved.
@ -151,7 +152,6 @@ only required once.**
- [ ] Go to the History section and find the "Post Submit" build of the merged commit that's running. Wait for it to go Green. If it's red, fix error repeat this section. Take note of the docker image version tag for next step. Example: 0.5.0-a4706cb.
- [ ] Run `./docs/governance/templates/release.sh {source version tag} {version}` to copy the images to open-match-public-images.
- [ ] If this is a new minor version in the newest major version then run `./docs/governance/templates/release.sh {source version tag} latest`.
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
- [ ] Copy the files from `build/release/` generated from `make release` to the release draft you created. You can drag and drop the files using the Github UI.
- [ ] Update [Slack invitation link](https://slack.com/help/articles/201330256-invite-new-members-to-your-workspace#share-an-invite-link) in [open-match.dev](https://open-match.dev/site/docs/contribute/#get-involved).
- [ ] Test Open Match installation under GKE and Minikube enviroment using YAML files and Helm. Follow the [First Match](https://development.open-match.dev/site/docs/getting-started/first_match/) guide, run `make proxy-demo`, and open `localhost:51507` to make sure everything works.
@ -165,7 +165,6 @@ only required once.**
- [ ] Save the release as a draft.
- [ ] Circulate the draft release to active contributors. Where reasonable, get everyone's ok on the release notes before continuing.
- [ ] Publish the [Release](om-release) in Github. This will notify repository watchers.
- [ ] Publish the [Release](om-release) on Open Match [Blog](https://open-match.dev/site/blog/).
## Announce

@ -1,16 +0,0 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
If this is your first time, please read our contributor guidelines: https://github.com/googleforgames/open-match/blob/master/CONTRIBUTING.md and developer guide https://github.com/googleforgames/open-match/blob/master/docs/development.md
-->
**What this PR does / Why we need it**:
**Which issue(s) this PR fixes**:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Closes #<issue number>`, or `Closes (paste link of issue)`.
-->
Closes #
**Special notes for your reviewer**:

186
Makefile

@ -15,45 +15,44 @@
## Open Match Make Help
## ====================
##
## # Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## make activate-gcp-apis
## make create-gke-cluster push-helm
##
## # Create a Minikube Cluster (requires VirtualBox)
## Create a Minikube Cluster (requires VirtualBox)
## make create-mini-cluster push-helm
##
## # Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## make create-kind-cluster get-kind-kubeconfig
##
## # Finish KinD setup by installing helm:
## Finish KinD setup by installing helm:
## make push-helm
##
## # Deploy Open Match
## Deploy Open Match
## make push-images -j$(nproc)
## make install-chart
##
## # Build and Test
## Build and Test
## make all -j$(nproc)
## make test
##
## # Access telemetry
## Access telemetry
## make proxy-prometheus
## make proxy-grafana
## make proxy-ui
##
## # Teardown
## Teardown
## make delete-mini-cluster
## make delete-gke-cluster
## make delete-kind-cluster && export KUBECONFIG=""
##
## # Prepare a Pull Request
## Prepare a Pull Request
## make presubmit
##
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 1.3.0
BASE_VERSION = 1.0.0
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -69,7 +68,7 @@ GOLANGCI_VERSION = 1.18.0
KIND_VERSION = 0.5.1
SWAGGERUI_VERSION = 3.24.2
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
GRPC_GATEWAY_VERSION = 2.3.0
GRPC_GATEWAY_VERSION = 1.14.3
TERRAFORM_VERSION = 0.12.13
CHART_TESTING_VERSION = 2.4.0
@ -124,7 +123,7 @@ GCLOUD = gcloud --quiet
OPEN_MATCH_HELM_NAME = open-match
OPEN_MATCH_KUBERNETES_NAMESPACE = open-match
OPEN_MATCH_SECRETS_DIR = $(REPOSITORY_ROOT)/install/helm/open-match/secrets
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep ACTIVE -a2 | grep account: | cut -c 10-)
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep account: | cut -c 10-)
_GCB_POST_SUBMIT ?= 0
# Latest version triggers builds of :latest images.
_GCB_LATEST_VERSION ?= undefined
@ -188,7 +187,7 @@ else
endif
endif
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go internal/ipb/messages.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
SWAGGER_JSON_DOCS = api/frontend.swagger.json api/backend.swagger.json api/query.swagger.json api/matchfunction.swagger.json api/evaluator.swagger.json
@ -198,7 +197,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-backfill base-build
IMAGES = $(CMDS) mmf-go-soloduel base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -210,18 +209,14 @@ local-cloud-build: gcloud
################################################################################
## #############################################################################
## Image commands:
## These commands are auto-generated based on a complete list of images.
## All folders in cmd/ are turned into an image using Dockerfile.cmd.
## Additional images are specified by the IMAGES variable.
## Image commands ommit the "openmatch-" prefix on the image name and tags.
## These commands are auto-generated based on a complete list of images. All
## folders in cmd/ are turned into an image using Dockerfile.cmd. Additional
## images are specified by the IMAGES variable. Image commands ommit the
## "openmatch-" prefix on the image name and tags.
##
list-images:
@echo $(IMAGES)
#######################################
## # Builds images locally
## build-images / build-<image name>-image
## build-images / build-<image name>-image: builds images locally
##
build-images: $(foreach IMAGE,$(IMAGES),build-$(IMAGE)-image)
@ -242,12 +237,9 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
build-mmf-go-soloduel-image: docker build-base-build-image
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
build-mmf-go-backfill-image: docker build-base-build-image
docker build -f examples/functions/golang/backfill/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-backfill:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-backfill:$(ALTERNATE_TAG) .
#######################################
## # Builds and pushes images to your container registry.
## push-images / push-<image name>-image
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
##
push-images: $(foreach IMAGE,$(IMAGES),push-$(IMAGE)-image)
@ -266,9 +258,8 @@ endif
endif
#######################################
## # Publishes images on the public container registry.
## # Used for publishing releases.
## retag-images / retag-<image name>-image
## retag-images / retag-<image name>-image: publishes images on the public
## container registry. Used for publishing releases.
##
retag-images: $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image)
@ -281,8 +272,7 @@ $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image): retag-%-image: docker
docker push $(TARGET_REGISTRY)/openmatch-$*:$(TAG)
#######################################
## # Removes images from local docker
## clean-images / clean-<image name>-image
## clean-images / clean-<image name>-image: removes images from local docker
##
clean-images: docker $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image)
-docker rmi -f open-match-base-build
@ -292,7 +282,7 @@ $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image): clean-%-image:
#####################################################################################################################
update-chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/incubator; $(HELM) repo add bitnami https://charts.bitnami.com/bitnami;$(HELM) dependency update)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com; $(HELM) dependency update)
lint-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/ct$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm; $(HELM) lint $(OPEN_MATCH_HELM_NAME))
@ -305,8 +295,8 @@ build/chart/open-match-$(BASE_VERSION).tgz: build/toolchain/bin/helm$(EXE_EXTENS
build/chart/index.yaml: build/toolchain/bin/helm$(EXE_EXTENSION) gcloud build/chart/open-match-$(BASE_VERSION).tgz
mkdir -p $(BUILD_DIR)/chart-index/
-gsutil cp $(_CHARTS_BUCKET)/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp $(_CHARTS_BUCKET)/chart/open-match-* $(BUILD_DIR)/chart-index/
-gsutil cp gs://open-match-chart/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp gs://open-match-chart/chart/open-match-* $(BUILD_DIR)/chart-index/
$(HELM) repo index $(BUILD_DIR)/chart-index/
$(HELM) repo index --merge $(BUILD_DIR)/chart-index/index.yaml $(BUILD_DIR)/chart/
@ -320,7 +310,7 @@ install-chart-prerequisite: build/toolchain/bin/kubectl$(EXE_EXTENSION) update-c
$(KUBECTL) apply -f install/gke-metadata-server-workaround.yaml
# Used for Open Match development. Install om-configmap-override.yaml by default.
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD) --set redis.usePassword=false --set redis.sentinel.usePassword=false
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD)
# Used for generate static yamls. Install om-configmap-override.yaml as needed.
HELM_TEMPLATE_FLAGS = --no-hooks --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --set usingHelmTemplate=true
HELM_IMAGE_FLAGS = --set global.image.registry=$(REGISTRY) --set global.image.tag=$(TAG)
@ -364,22 +354,19 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
--set open-match-core.redis.enabled=false \
--set global.telemetry.prometheus.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set global.kubernetes.serviceAccount=$(OPEN_MATCH_HELM_NAME)-unprivileged-service \
--set open-match-scale.enabled=true \
--set open-match-scale.configs.default.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-default\" \}\}" \
--set open-match-scale.configs.override.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-override\" \}\}" | $(KUBECTL) apply -f -
--set open-match-scale.enabled=true | $(KUBECTL) apply -f -
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
$(HELM) upgrade $(OPEN_MATCH_HELM_NAME) $(HELM_UPGRADE_FLAGS) --atomic install/helm/open-match $(HELM_IMAGE_FLAGS) \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1 \
--set evaluator.hostName=open-match-test \
--set evaluator.hostName=test \
--set evaluator.grpcPort=50509 \
--set evaluator.httpPort=51509 \
--set open-match-core.registrationInterval=200ms \
--set open-match-core.proposalCollectionInterval=200ms \
--set open-match-core.assignedDeleteTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=1s \
--set open-match-core.pendingReleaseTimeout=200ms \
--set open-match-core.queryPageSize=10 \
--set global.gcpProjectId=intentionally-invalid-value \
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
@ -399,12 +386,9 @@ install/yaml/: TAG = $(BASE_VERSION)
endif
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
# We have to hard-code the Jaeger endpoints as we are excluding Jaeger, so Helm cannot determine the endpoints from the Jaeger subchart
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
--set-string global.telemetry.jaeger.agentEndpoint="$(OPEN_MATCH_HELM_NAME)-jaeger-agent:6831" \
--set-string global.telemetry.jaeger.collectorEndpoint="http://$(OPEN_MATCH_HELM_NAME)-jaeger-collector:14268/api/traces" \
install/helm/open-match > install/yaml/01-open-match-core.yaml
install/yaml/02-open-match-demo.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -422,7 +406,6 @@ install/yaml/03-prometheus-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set global.telemetry.prometheus.enabled=true \
install/helm/open-match > install/yaml/03-prometheus-chart.yaml
# We have to hard-code the Prometheus Server URL as we are excluding Prometheus, so Helm cannot determine the URL from the Prometheus subchart
install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
@ -430,7 +413,6 @@ install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set open-match-core.redis.enabled=false \
--set open-match-telemetry.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set-string global.telemetry.grafana.prometheusServer="http://$(OPEN_MATCH_HELM_NAME)-prometheus-server.$(OPEN_MATCH_KUBERNETES_NAMESPACE).svc.cluster.local:80/" \
install/helm/open-match > install/yaml/04-grafana-chart.yaml
install/yaml/05-jaeger-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -477,29 +459,11 @@ set-redis-password:
read REDIS_PASSWORD; \
stty echo; \
printf "\n"; \
$(KUBECTL) create secret generic open-match-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
## ####################################
## # Tool installation helpers
##
$(KUBECTL) create secret generic om-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
## # Install toolchain. Short for installing K8s, protoc and OpenMatch tools.
## make install-toolchain
##
install-toolchain: install-kubernetes-tools install-protoc-tools install-openmatch-tools
## # Install Kubernetes tools
## make install-kubernetes-tools
##
install-kubernetes-tools: build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/terraform$(EXE_EXTENSION)
## # Install protoc tools
## make install-protoc-tools
##
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
## # Install OpenMatch tools
## make install-openmatch-tools
##
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
install-openmatch-tools: build/toolchain/bin/certgen$(EXE_EXTENSION) build/toolchain/bin/reaper$(EXE_EXTENSION)
build/toolchain/bin/helm$(EXE_EXTENSION):
@ -571,11 +535,11 @@ build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION):
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/golang/protobuf/protoc-gen-go
build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION):
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION):
build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
build/toolchain/bin/certgen$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
@ -633,21 +597,16 @@ get-kind-kubeconfig: build/toolchain/bin/kind$(EXE_EXTENSION)
delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(KIND) delete cluster
create-cluster-role-binding:
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
create-gke-cluster: GKE_VERSION = 1.20.8-gke.900 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-8 --enable-autoscaling --min-nodes 1 --num-nodes 6 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_VERSION = 1.14.10-gke.32 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container clusters create $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GKE_CLUSTER_SHAPE_FLAGS) $(GKE_FUTURE_COMPAT_FLAGS) $(GKE_CLUSTER_FLAGS) \
--enable-pod-security-policy \
--cluster-version $(GKE_VERSION) \
--image-type cos_containerd \
--tags open-match \
--workload-pool $(PROJECT_ID).svc.id.goog
$(MAKE) create-cluster-role-binding
--tags open-match
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
delete-gke-cluster: gcloud
-$(GCLOUD) $(GCP_PROJECT_FLAG) container clusters delete $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GCLOUD_EXTRA_FLAGS)
@ -661,19 +620,12 @@ delete-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
gcp-apply-binauthz-policy: build/policies/binauthz.yaml
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container binauthz policy import build/policies/binauthz.yaml
## ####################################
## # Protobuf
##
## # Build all protobuf definitions.
## make all-protos
##
all-protos: $(ALL_PROTOS)
# The proto generator really wants to be run from the $GOPATH root, and doesn't
# support methods for directing it to the correct location that's not the proto
# file's location.
# So, instead, put it in a tempororary directory, then move it out.
# file's location. So instead put it in a tempororary directory, then move it
# out.
pkg/pb/%.pb.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/pkg/pb
$(PROTOC) $< \
@ -695,24 +647,25 @@ pkg/pb/%.pb.gw.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTE
--grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/$@ $@
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
$(PROTOC) $< \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--openapiv2_out=json_names_for_fields=false,logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
--swagger_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
## # Build API reference in markdown. Needs open-match-docs repo at the same level as this one.
## make api/api.md
##
api/api.md: third_party/ build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION)
$(PROTOC) api/*.proto \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--doc_out=. \
--doc_opt=markdown,api_temp.md
--doc_opt=markdown,api.md
# Crazy hack that insert hugo link reference to this API doc -)
cat ./docs/hugo_apiheader.txt ./api_temp.md >> api.md
mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
rm ./api_temp.md
$(SED_REPLACE) '1 i\---\
title: "Open Match API References" \
linkTitle: "Open Match API References" \
weight: 2 \
description: \
This document provides API references for Open Match services. \
--- \
' ./api.md && mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
# Include structure of the protos needs to be called out do the dependency chain is run through properly.
pkg/pb/backend.pb.go: pkg/pb/messages.pb.go
@ -721,15 +674,7 @@ pkg/pb/matchfunction.pb.go: pkg/pb/messages.pb.go
pkg/pb/query.pb.go: pkg/pb/messages.pb.go
pkg/pb/evaluator.pb.go: pkg/pb/messages.pb.go
internal/ipb/synchronizer.pb.go: pkg/pb/messages.pb.go
internal/ipb/messages.pb.go: pkg/pb/messages.pb.go
## ####################################
## # Go tasks
##
## # Build assets and binaries
## make build
##
build: assets
$(GO) build ./...
$(GO) build -tags e2ecluster ./...
@ -751,15 +696,9 @@ define fast_test_folder
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
endef
## # Run go tests
## make test
##
test: $(ALL_PROTOS) tls-certs third_party/
$(call test_folder,.)
## # Run go tests more quickly, but with worse flake and race detection
## make fasttest
##
fasttest: $(ALL_PROTOS) tls-certs third_party/
$(call fast_test_folder,.)
@ -776,9 +715,6 @@ vet:
golangci: build/toolchain/bin/golangci-lint$(EXE_EXTENSION)
GO111MODULE=on $(GOLANGCI) run --config=$(REPOSITORY_ROOT)/.golangci.yaml
## # Run linter on Go code, charts and terraform
## make lint
##
lint: fmt vet golangci lint-chart terraform-lint
assets: $(ALL_PROTOS) tls-certs third_party/ build/chart/
@ -849,13 +785,13 @@ md-test: docker
ci-deploy-artifacts: install/yaml/ $(SWAGGER_JSON_DOCS) build/chart/ gcloud
ifeq ($(_GCB_POST_SUBMIT),1)
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* $(_CHARTS_BUCKET)/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json $(_CHARTS_BUCKET)/api/v$(BASE_VERSION)/
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* gs://open-match-chart/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json gs://open-match-chart/api/v$(BASE_VERSION)/
# Deploy Helm Chart
# Since each build will refresh just it's version we can allow this for every post submit.
# Copy the files into multiple locations to keep a backup.
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/
else
@echo "Not deploying build artifacts to open-match.dev because this is not a post commit change."
endif
@ -984,7 +920,7 @@ proxy:
update-deps:
$(GO) mod tidy
third_party/: third_party/google/api third_party/protoc-gen-openapiv2/options third_party/swaggerui/
third_party/: third_party/google/api third_party/protoc-gen-swagger/options third_party/swaggerui/
third_party/google/api:
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
@ -996,12 +932,12 @@ third_party/google/api:
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
third_party/protoc-gen-openapiv2/options:
third_party/protoc-gen-swagger/options:
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-openapiv2/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options/
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
third_party/swaggerui/:

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
title: "Backend"
version: "1.0"
@ -93,7 +93,7 @@ message ReleaseAllTicketsRequest{}
message ReleaseAllTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup {
message AssignmentGroup{
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
repeated string ticket_ids = 1;
@ -146,6 +146,7 @@ service BackendService {
// ReleaseTickets moves tickets from the pending state, to the active state.
// This enables them to be returned by query, and find different matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseTickets(ReleaseTicketsRequest) returns (ReleaseTicketsResponse) {
@ -158,6 +159,7 @@ service BackendService {
// ReleaseAllTickets moves all tickets from the pending state, to the active
// state. This enables them to be returned by query, and find different
// matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {

@ -13,11 +13,6 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "BackendService"
}
],
"schemes": [
"http",
"https"
@ -32,21 +27,12 @@
"/v1/backendservice/matches:fetch": {
"post": {
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
"operationId": "BackendService_FetchMatches",
"operationId": "FetchMatches",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchFetchMatchesResponse"
"$ref": "#/x-stream-definitions/openmatchFetchMatchesResponse"
}
},
"404": {
@ -55,12 +41,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -81,7 +61,7 @@
"/v1/backendservice/tickets:assign": {
"post": {
"summary": "AssignTickets overwrites the Assignment field of the input TicketIds.",
"operationId": "BackendService_AssignTickets",
"operationId": "AssignTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -95,12 +75,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -120,8 +94,9 @@
},
"/v1/backendservice/tickets:release": {
"post": {
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "BackendService_ReleaseTickets",
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -135,12 +110,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -160,8 +129,9 @@
},
"/v1/backendservice/tickets:releaseall": {
"post": {
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "BackendService_ReleaseAllTickets",
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseAllTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -175,12 +145,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -208,17 +172,6 @@
],
"default": "UNKNOWN"
},
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignTicketsRequest": {
"type": "object",
"properties": {
@ -289,37 +242,6 @@
},
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -336,10 +258,6 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -418,14 +336,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -609,27 +519,44 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"rpcStatus": {
"runtimeStreamError": {
"type": "object",
"properties": {
"code": {
"grpc_code": {
"type": "integer",
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
"type": "string"
},
"http_status": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
}
}
}
},
"x-stream-definitions": {
"openmatchFetchMatchesResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
"title": "Stream result of openmatchFetchMatchesResponse"
}
},
"externalDocs": {

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
title: "Evaluator"
version: "1.0"
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
};
message EvaluateRequest {

@ -13,11 +13,6 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "Evaluator"
}
],
"schemes": [
"http",
"https"
@ -32,21 +27,12 @@
"/v1/evaluator/matches:evaluate": {
"post": {
"summary": "Evaluate evaluates a list of proposed matches based on quality, collision status, and etc, then shortlist the matches and returns the final results.",
"operationId": "Evaluator_Evaluate",
"operationId": "Evaluate",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchEvaluateResponse"
"$ref": "#/x-stream-definitions/openmatchEvaluateResponse"
}
},
"404": {
@ -55,12 +41,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -98,37 +78,6 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchEvaluateRequest": {
"type": "object",
"properties": {
@ -175,14 +124,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -260,27 +201,44 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"rpcStatus": {
"runtimeStreamError": {
"type": "object",
"properties": {
"code": {
"grpc_code": {
"type": "integer",
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
"type": "string"
},
"http_status": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
}
}
}
},
"x-stream-definitions": {
"openmatchEvaluateResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
"title": "Stream result of openmatchEvaluateResponse"
}
},
"externalDocs": {

@ -19,10 +19,10 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "google/protobuf/empty.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
title: "Frontend"
version: "1.0"
@ -53,7 +53,7 @@ option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
};
message CreateTicketRequest {
@ -81,57 +81,6 @@ message WatchAssignmentsResponse {
Assignment assignment = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message AcknowledgeBackfillRequest {
// An existing ID of Backfill to acknowledge.
string backfill_id = 1;
// An updated Assignment of the requested Backfill.
Assignment assignment = 2;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message AcknowledgeBackfillResponse {
// The Backfill that was acknowledged.
Backfill backfill = 1;
// All of the Tickets that were successfully assigned
repeated Ticket tickets = 2;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message CreateBackfillRequest {
// An empty Backfill object.
Backfill backfill = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message DeleteBackfillRequest {
// An existing ID of Backfill to delete.
string backfill_id = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message GetBackfillRequest {
// An existing ID of Backfill to retrieve.
string backfill_id = 1;
}
// UpdateBackfillRequest - update searchFields, extensions and set assignment.
//
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message UpdateBackfillRequest {
// A Backfill object with ID set and fields to update.
Backfill backfill = 1;
}
// The FrontendService implements APIs to manage and query status of a Tickets.
service FrontendService {
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
@ -168,55 +117,4 @@ service FrontendService {
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
};
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info
// This triggers an assignment process.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc AcknowledgeBackfill(AcknowledgeBackfillRequest) returns (AcknowledgeBackfillResponse) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills/{backfill_id}/acknowledge"
body: "*"
};
}
// CreateBackfill creates a new Backfill object.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc CreateBackfill(CreateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills"
body: "*"
};
}
// DeleteBackfill receives a backfill ID and deletes its resource.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc DeleteBackfill(DeleteBackfillRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// GetBackfill returns a backfill object by its ID.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc GetBackfill(GetBackfillRequest) returns (Backfill) {
option (google.api.http) = {
get: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// UpdateBackfill updates search_fields and extensions for the backfill with the provided id.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc UpdateBackfill(UpdateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
patch: "/v1/frontendservice/backfills"
body: "*"
};
}
}

@ -13,11 +13,6 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "FrontendService"
}
],
"schemes": [
"http",
"https"
@ -29,211 +24,10 @@
"application/json"
],
"paths": {
"/v1/frontendservice/backfills": {
"post": {
"summary": "CreateBackfill creates a new Backfill object.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_CreateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchCreateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
},
"patch": {
"summary": "UpdateBackfill updates search_fields and extensions for the backfill with the provided id.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_UpdateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchUpdateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}": {
"get": {
"summary": "GetBackfill returns a backfill object by its ID.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_GetBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to retrieve.",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
},
"delete": {
"summary": "DeleteBackfill receives a backfill ID and deletes its resource.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_DeleteBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"properties": {}
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to delete.",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}/acknowledge": {
"post": {
"summary": "AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info\nThis triggers an assignment process.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_AcknowledgeBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchAcknowledgeBackfillResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to acknowledge.",
"in": "path",
"required": true,
"type": "string"
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchAcknowledgeBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/tickets": {
"post": {
"summary": "CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.\nA ticket is considered as ready for matchmaking once it is created.\n - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.\n - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.",
"operationId": "FrontendService_CreateTicket",
"operationId": "CreateTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -247,12 +41,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -273,7 +61,7 @@
"/v1/frontendservice/tickets/{ticket_id}": {
"get": {
"summary": "GetTicket get the Ticket associated with the specified TicketId.",
"operationId": "FrontendService_GetTicket",
"operationId": "GetTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -287,12 +75,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -310,7 +92,7 @@
},
"delete": {
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
"operationId": "FrontendService_DeleteTicket",
"operationId": "DeleteTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -324,12 +106,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -349,21 +125,12 @@
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
"get": {
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "FrontendService_WatchAssignments",
"operationId": "WatchAssignments",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
}
},
"404": {
@ -372,12 +139,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -396,37 +157,6 @@
}
},
"definitions": {
"openmatchAcknowledgeBackfillRequest": {
"type": "object",
"properties": {
"backfill_id": {
"type": "string",
"description": "An existing ID of Backfill to acknowledge."
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Backfill."
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchAcknowledgeBackfillResponse": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "The Backfill that was acknowledged."
},
"tickets": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchTicket"
},
"title": "All of the Tickets that were successfully assigned"
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -444,47 +174,6 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchCreateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "An empty Backfill object."
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchCreateTicketRequest": {
"type": "object",
"properties": {
@ -552,16 +241,6 @@
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"openmatchUpdateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "A Backfill object with ID set and fields to update."
}
},
"description": "UpdateBackfillRequest - update searchFields, extensions and set assignment.\n\nBETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
@ -586,27 +265,44 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"rpcStatus": {
"runtimeStreamError": {
"type": "object",
"properties": {
"code": {
"grpc_code": {
"type": "integer",
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
"type": "string"
},
"http_status": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
}
}
}
},
"x-stream-definitions": {
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
"title": "Stream result of openmatchWatchAssignmentsResponse"
}
},
"externalDocs": {

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
title: "Match Function"
version: "1.0"
@ -69,9 +69,8 @@ message RunResponse {
// The MatchFunction service implements APIs to run user-defined matchmaking logics.
service MatchFunction {
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
// Run pulls Tickets that satisfy Profile constraints from QueryService,
// runs matchmaking logic against them, then constructs and streams back
// match candidates to the Backend service.
// Run pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then
// constructs and streams back match candidates to the Backend service.
rpc Run(RunRequest) returns (stream RunResponse) {
option (google.api.http) = {
post: "/v1/matchfunction:run"

@ -13,11 +13,6 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "MatchFunction"
}
],
"schemes": [
"http",
"https"
@ -31,22 +26,13 @@
"paths": {
"/v1/matchfunction:run": {
"post": {
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService,\nruns matchmaking logic against them, then constructs and streams back\nmatch candidates to the Backend service.",
"operationId": "MatchFunction_Run",
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"operationId": "Run",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchRunResponse"
"$ref": "#/x-stream-definitions/openmatchRunResponse"
}
},
"404": {
@ -55,12 +41,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -80,17 +60,6 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -108,37 +77,6 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -155,10 +93,6 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -191,14 +125,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -379,27 +305,44 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"rpcStatus": {
"runtimeStreamError": {
"type": "object",
"properties": {
"code": {
"grpc_code": {
"type": "integer",
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
"type": "string"
},
"http_status": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
}
}
}
},
"x-stream-definitions": {
"openmatchRunResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
"title": "Stream result of openmatchRunResponse"
}
},
"externalDocs": {

@ -103,25 +103,6 @@ message DoubleRangeFilter {
// Minimum value.
double min = 3;
enum Exclude {
// No bounds should be excluded when evaluating the filter, i.e.: MIN <= x <= MAX
NONE = 0;
// Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN < x <= MAX
MIN = 1;
// Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN <= x < MAX
MAX = 2;
// Both bounds should be excluded when evaluating the filter, i.e.: MIN < x < MAX
BOTH = 3;
}
// Defines the bounds to apply when filtering tickets by their search_fields.double_args value.
// BETA FEATURE WARNING: This field and the associated values are
// not finalized and still subject to possible change or removal.
Exclude exclude = 4;
}
// Filters strings exactly equaling a value.
@ -220,45 +201,6 @@ message Match {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 7;
// Backfill request which contains additional information to the match
// and contains an association to a GameServer.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
Backfill backfill = 8;
// AllocateGameServer signalise Director that Backfill is new and it should
// allocate a GameServer, this Backfill would be assigned.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
bool allocate_gameserver = 9;
// Deprecated fields.
reserved 5, 6;
}
// Represents a backfill entity which is used to fill partially full matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
message Backfill {
// Id represents an auto-generated Id issued by Open Match.
string id = 1;
// Search fields are the fields which Open Match is aware of, and can be used
// when specifying filters.
SearchFields search_fields = 2;
// Customized information not inspected by Open Match, to be used by
// the Match Function, evaluator, and components making calls to Open Match.
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 3;
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 4;
// Generation gets incremented on GameServers update operations.
// Prevents the MMF from overriding a newer version from the game server.
// Do NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs.
int64 generation = 5;
}

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
title: "MM Logic (Data Layer)"
version: "1.0"
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
};
message QueryTicketsRequest {
@ -75,26 +75,12 @@ message QueryTicketIdsResponse {
repeated string ids = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsRequest {
// The Pool representing the set of Filters to be queried.
Pool pool = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsResponse {
// Backfills that meet all the filtering criteria requested by the pool.
repeated Backfill backfills = 1;
}
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/tickets:query"
@ -105,21 +91,11 @@ service QueryService {
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/ticketids:query"
body: "*"
};
}
// QueryBackfills gets a list of Backfills.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc QueryBackfills(QueryBackfillsRequest) returns (stream QueryBackfillsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/backfills:query"
body: "*"
};
}
}

@ -13,11 +13,6 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "QueryService"
}
],
"schemes": [
"http",
"https"
@ -29,73 +24,15 @@
"application/json"
],
"paths": {
"/v1/queryservice/backfills:query": {
"post": {
"summary": "QueryBackfills gets a list of Backfills.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "QueryService_QueryBackfills",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryBackfillsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryBackfillsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchQueryBackfillsRequest"
}
}
],
"tags": [
"QueryService"
]
}
},
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryService_QueryTicketIds",
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
}
},
"404": {
@ -104,12 +41,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -129,22 +60,13 @@
},
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryService_QueryTickets",
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTickets",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
"$ref": "#/x-stream-definitions/openmatchQueryTicketsResponse"
}
},
"404": {
@ -153,12 +75,6 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -178,17 +94,6 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -206,37 +111,6 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -253,10 +127,6 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -300,29 +170,6 @@
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchQueryBackfillsRequest": {
"type": "object",
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryBackfillsResponse": {
"type": "object",
"properties": {
"backfills": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchBackfill"
},
"description": "Backfills that meet all the filtering criteria requested by the pool."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryTicketIdsRequest": {
"type": "object",
"properties": {
@ -460,27 +307,56 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"rpcStatus": {
"runtimeStreamError": {
"type": "object",
"properties": {
"code": {
"grpc_code": {
"type": "integer",
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
"type": "string"
},
"http_status": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
}
}
}
},
"x-stream-definitions": {
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
"title": "Stream result of openmatchQueryTicketIdsResponse"
},
"openmatchQueryTicketsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
}
},
"externalDocs": {

@ -90,7 +90,7 @@ steps:
- id: 'Build: Assets'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'assets', '-j12']
args: ['make', 'assets', '-j12']
volumes:
- name: 'go-vol'
path: '/go'
@ -106,7 +106,7 @@ steps:
- id: 'Test: Services'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'GOPROXY=off', 'GOLANG_TEST_COUNT=10', 'test']
args: ['make', 'GOLANG_TEST_COUNT=10', 'test']
volumes:
- name: 'go-vol'
path: '/go'
@ -132,7 +132,7 @@ steps:
- id: 'Deploy: Deployment Configs'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'ci-deploy-artifacts']
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', 'ci-deploy-artifacts']
waitFor: ['Lint: Format, Vet, Charts', 'Test: Deploy Open Match']
volumes:
- name: 'go-vol'
@ -153,7 +153,7 @@ steps:
artifacts:
objects:
location: '${_ARTIFACTS_BUCKET}'
location: gs://open-match-build-artifacts/output/
paths:
- install/yaml/install.yaml
- install/yaml/01-open-match-core.yaml
@ -164,13 +164,10 @@ artifacts:
- install/yaml/06-open-match-override-configmap.yaml
substitutions:
_OM_VERSION: "1.3.0"
_OM_VERSION: "1.0.0"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/output/"
_LOGS_BUCKET: "gs://open-match-build-logs/"
_CHARTS_BUCKET: "gs://open-match-chart"
logsBucket: '${_LOGS_BUCKET}'
logsBucket: 'gs://open-match-build-logs/'
options:
sourceProvenanceHash: ['SHA256']
machineType: 'N1_HIGHCPU_32'

@ -1,10 +1,10 @@
{
"urls": [
{"name": "Frontend", "url": "https://open-match.dev/api/v0.0.0-dev/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v0.0.0-dev/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v0.0.0-dev/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v0.0.0-dev/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v0.0.0-dev/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v0.0.0-dev/evaluator.swagger.json"}
{"name": "Frontend", "url": "https://open-match.dev/api/v1.0.0/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v1.0.0/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v1.0.0/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v1.0.0/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v1.0.0/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v1.0.0/evaluator.swagger.json"}
]
}

@ -46,7 +46,7 @@ make
*Typically for contributing you'll want to
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
but for purpose of this guide we'll be using the upstream/main.*
but for purpose of this guide we'll be using the upstream/master.*
## Building code and images
@ -111,8 +111,8 @@ While iterating on the project, you may need to:
## Accessing logs
To look at Open Match core services' logs, run:
```bash
# Replace open-match-frontend with the service name that you would like to access
kubectl logs -n open-match svc/open-match-frontend
# Replace om-frontend with the service name that you would like to access
kubectl logs -n open-match svc/om-frontend
```
## API References

@ -12,13 +12,24 @@ SOURCE_VERSION=$1
DEST_VERSION=$2
SOURCE_PROJECT_ID=open-match-build
DEST_PROJECT_ID=open-match-public-images
IMAGE_NAMES=$(make list-images)
IMAGE_NAMES="openmatch-backend openmatch-frontend openmatch-query openmatch-synchronizer openmatch-minimatch openmatch-demo-first-match openmatch-mmf-go-soloduel openmatch-mmf-go-pool openmatch-evaluator-go-simple openmatch-swaggerui openmatch-reaper"
for name in $IMAGE_NAMES
do
source_image=gcr.io/$SOURCE_PROJECT_ID/openmatch-$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/openmatch-$name:$DEST_VERSION
source_image=gcr.io/$SOURCE_PROJECT_ID/$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION
docker pull $source_image
docker tag $source_image $dest_image
docker push $dest_image
done
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "Add these lines to your release notes:"
for name in $IMAGE_NAMES
do
echo "docker pull gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION"
done

@ -1,7 +0,0 @@
---
title: "Open Match API References"
linkTitle: "Open Match API References"
weight: 2
description:
This document provides API references for Open Match services.
---

@ -37,7 +37,7 @@ func New() *ByteSub {
}
}
// AnnounceLatest writes b to all of the subscribers, with caveats listed in Subscribe.
// AnnounceLatest writes b to all of the subscribers, with caviets listed in Subscribe.
func (s *ByteSub) AnnounceLatest(b []byte) {
s.r.Lock()
defer s.r.Unlock()

@ -51,7 +51,7 @@ func TestFastAndSlow(t *testing.T) {
for count := 0; true; count++ {
if v := <-slow; v == "3" {
if count > 1 {
t.Error("Expected to receive at most 1 other value on slow before receiving the latest value.")
t.Error("Expected to recieve at most 1 other value on slow before recieving the latest value.")
}
break
}

@ -81,7 +81,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("open-match-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
conn, err := grpc.Dial("om-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
if err != nil {
panic(err)
}

@ -68,7 +68,7 @@ func run(ds *components.DemoShared) {
ds.Update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("open-match-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
conn, err := grpc.Dial("om-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
if err != nil {
panic(err)
}

@ -37,7 +37,7 @@ type Updater struct {
type SetFunc func(v interface{})
// New creates an Updater. Set is called when fields update, using the json
// serialized value of Updater's tree. All updates after ctx is canceled are
// sererialized value of Updater's tree. All updates after ctx is canceled are
// ignored.
func New(ctx context.Context, set func([]byte)) *Updater {
f := func(v interface{}) {

@ -1,24 +0,0 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM open-match-base-build as builder
WORKDIR /go/src/open-match.dev/open-match/examples/functions/golang/backfill
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o matchfunction .
FROM gcr.io/distroless/static:nonroot
WORKDIR /app/
COPY --from=builder --chown=nonroot /go/src/open-match.dev/open-match/examples/functions/golang/backfill/matchfunction /app/
ENTRYPOINT ["/app/matchfunction"]

@ -1,33 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main defines a sample match function that uses the GRPC harness to set up
// the match making function as a service. This sample is a reference
// to demonstrate the usage of the GRPC harness and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package main
import (
"open-match.dev/open-match/examples/functions/golang/backfill/mmf"
)
const (
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {
mmf.Start(queryServiceAddr, serverPort)
}

@ -1,297 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"time"
"log"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/grpc"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
const (
playersPerMatch = 2
openSlotsKey = "open-slots"
matchName = "backfill-matchfunction"
)
// matchFunctionService implements pb.MatchFunctionServer, the server generated
// by compiling the protobuf, by fulfilling the pb.MatchFunctionServer interface.
type matchFunctionService struct {
grpc *grpc.Server
queryServiceClient pb.QueryServiceClient
port int
}
func (s *matchFunctionService) Run(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
log.Printf("Generating proposals for function %v", req.GetProfile().GetName())
var proposals []*pb.Match
profile := req.GetProfile()
pools := profile.GetPools()
for _, p := range pools {
tickets, err := matchfunction.QueryPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query tickets for the given pool, got %s", err.Error())
return err
}
backfills, err := matchfunction.QueryBackfillPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query backfills for the given pool, got %s", err.Error())
return err
}
matches, err := makeMatches(profile, p, tickets, backfills)
if err != nil {
log.Printf("Failed to generate matches, got %s", err.Error())
return err
}
proposals = append(proposals, matches...)
}
log.Printf("Streaming %v proposals to Open Match", len(proposals))
// Stream the generated proposals back to Open Match.
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
log.Printf("Failed to stream proposals to Open Match, got %s", err.Error())
return err
}
}
return nil
}
// makeMatches tries to handle backfills at first, then it makes full matches, at the end it makes a match with backfill
// if tickets left
func makeMatches(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, backfills []*pb.Backfill) ([]*pb.Match, error) {
var matches []*pb.Match
newMatches, remainingTickets, err := handleBackfills(profile, tickets, backfills, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, newMatches...)
newMatches, remainingTickets = makeFullMatches(profile, remainingTickets, len(matches))
matches = append(matches, newMatches...)
if len(remainingTickets) > 0 {
match, err := makeMatchWithBackfill(profile, pool, remainingTickets, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, match)
}
return matches, nil
}
// handleBackfills looks at each backfill's openSlots which is a number of required tickets,
// acquires that tickets, decreases openSlots in backfill and makes a match with updated backfill and associated tickets.
func handleBackfills(profile *pb.MatchProfile, tickets []*pb.Ticket, backfills []*pb.Backfill, lastMatchId int) ([]*pb.Match, []*pb.Ticket, error) {
matchId := lastMatchId
var matches []*pb.Match
for _, b := range backfills {
openSlots, err := getOpenSlots(b)
if err != nil {
return nil, tickets, err
}
var matchTickets []*pb.Ticket
for openSlots > 0 && len(tickets) > 0 {
matchTickets = append(matchTickets, tickets[0])
tickets = tickets[1:]
openSlots--
}
if len(matchTickets) > 0 {
err := setOpenSlots(b, openSlots)
if err != nil {
return nil, tickets, err
}
matchId++
match := newMatch(matchId, profile.Name, matchTickets, b)
matches = append(matches, &match)
}
}
return matches, tickets, nil
}
// makeMatchWithBackfill makes not full match, creates backfill for it with openSlots = playersPerMatch-len(tickets).
func makeMatchWithBackfill(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, lastMatchId int) (*pb.Match, error) {
if len(tickets) == 0 {
return nil, fmt.Errorf("tickets are required")
}
if len(tickets) >= playersPerMatch {
return nil, fmt.Errorf("too many tickets")
}
matchId := lastMatchId
searchFields := newSearchFields(pool)
backfill, err := newBackfill(searchFields, playersPerMatch-len(tickets))
if err != nil {
return nil, err
}
matchId++
match := newMatch(matchId, profile.Name, tickets, backfill)
// indicates that it is a new match and new game server should be allocated for it
match.AllocateGameserver = true
return &match, nil
}
// makeFullMatches makes matches without backfill
func makeFullMatches(profile *pb.MatchProfile, tickets []*pb.Ticket, lastMatchId int) ([]*pb.Match, []*pb.Ticket) {
ticketNum := 0
matchId := lastMatchId
var matches []*pb.Match
for ticketNum < playersPerMatch && len(tickets) >= playersPerMatch {
ticketNum++
if ticketNum == playersPerMatch {
matchId++
match := newMatch(matchId, profile.Name, tickets[:playersPerMatch], nil)
matches = append(matches, &match)
tickets = tickets[playersPerMatch:]
ticketNum = 0
}
}
return matches, tickets
}
// newSearchFields creates search fields based on pool's search criteria. This is just example of how it can be done.
func newSearchFields(pool *pb.Pool) *pb.SearchFields {
searchFields := pb.SearchFields{}
rangeFilters := pool.GetDoubleRangeFilters()
if rangeFilters != nil {
doubleArgs := make(map[string]float64)
for _, f := range rangeFilters {
doubleArgs[f.DoubleArg] = (f.Max - f.Min) / 2
}
if len(doubleArgs) > 0 {
searchFields.DoubleArgs = doubleArgs
}
}
stringFilters := pool.GetStringEqualsFilters()
if stringFilters != nil {
stringArgs := make(map[string]string)
for _, f := range stringFilters {
stringArgs[f.StringArg] = f.Value
}
if len(stringArgs) > 0 {
searchFields.StringArgs = stringArgs
}
}
tagFilters := pool.GetTagPresentFilters()
if tagFilters != nil {
tags := make([]string, len(tagFilters))
for _, f := range tagFilters {
tags = append(tags, f.Tag)
}
if len(tags) > 0 {
searchFields.Tags = tags
}
}
return &searchFields
}
func newBackfill(searchFields *pb.SearchFields, openSlots int) (*pb.Backfill, error) {
b := pb.Backfill{
SearchFields: searchFields,
Generation: 0,
CreateTime: ptypes.TimestampNow(),
}
err := setOpenSlots(&b, int32(openSlots))
return &b, err
}
func newMatch(num int, profile string, tickets []*pb.Ticket, b *pb.Backfill) pb.Match {
t := time.Now().Format("2006-01-02T15:04:05.00")
return pb.Match{
MatchId: fmt.Sprintf("profile-%s-time-%s-num-%d", profile, t, num),
MatchProfile: profile,
MatchFunction: matchName,
Tickets: tickets,
Backfill: b,
}
}
func setOpenSlots(b *pb.Backfill, val int32) error {
if b.Extensions == nil {
b.Extensions = make(map[string]*any.Any)
}
any, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: val})
if err != nil {
return err
}
b.Extensions[openSlotsKey] = any
return nil
}
func getOpenSlots(b *pb.Backfill) (int32, error) {
if b == nil {
return 0, fmt.Errorf("expected backfill is not nil")
}
if b.Extensions != nil {
if any, ok := b.Extensions[openSlotsKey]; ok {
var val wrappers.Int32Value
err := ptypes.UnmarshalAny(any, &val)
if err != nil {
return 0, err
}
return val.Value, nil
}
}
return playersPerMatch, nil
}

@ -1,142 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mmf
import (
"testing"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/stretchr/testify/require"
"open-match.dev/open-match/pkg/pb"
)
func TestHandleBackfills(t *testing.T) {
for _, tc := range []struct {
name string
tickets []*pb.Ticket
backfills []*pb.Backfill
lastMatchId int
expectedMatchLen int
expectedTicketLen int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns no matches when no backfills specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when no tickets specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns a match with open slots decreased", tickets: []*pb.Ticket{{Id: "1"}}, backfills: []*pb.Backfill{withOpenSlots(1)}, expectedMatchLen: 1, expectedTicketLen: 0, expectedOpenSlots: playersPerMatch - 2},
} {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets, err := handleBackfills(&profile, testCase.tickets, testCase.backfills, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
require.Equal(t, testCase.expectedTicketLen, len(tickets))
if err != nil {
require.Equal(t, 0, len(matches))
} else {
for _, m := range matches {
require.NotNil(t, m.Backfill)
openSlots, err := getOpenSlots(m.Backfill)
require.NoError(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
}
})
}
}
func TestMakeMatchWithBackfill(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns an error when length of tickets is greater then playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}, {Id: "5"}}, expectedErr: true},
{name: "returns an error when length of tickets is equal to playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}}, expectedErr: true},
{name: "returns an error when no tickets are provided", expectedErr: true},
{name: "returns a match with backfill", tickets: []*pb.Ticket{{Id: "1"}}, expectedOpenSlots: playersPerMatch - 1},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
pool := pb.Pool{}
profile := pb.MatchProfile{Name: "matchProfile"}
match, err := makeMatchWithBackfill(&profile, &pool, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
if err == nil {
require.NotNil(t, match)
require.NotNil(t, match.Backfill)
require.True(t, match.AllocateGameserver)
require.Equal(t, "", match.Backfill.Id)
openSlots, err := getOpenSlots(match.Backfill)
require.Nil(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
})
}
}
func TestMakeFullMatches(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedMatchLen int
expectedTicketLen int
}{
{name: "returns no matches when there are no tickets", tickets: []*pb.Ticket{}, expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when length of tickets is less then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}}, expectedMatchLen: 0, expectedTicketLen: 1},
{name: "returns a match when length of tickets is greater then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}}, expectedMatchLen: 1, expectedTicketLen: 0},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets := makeFullMatches(&profile, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedMatchLen, len(matches))
require.Equal(t, testCase.expectedTicketLen, len(tickets))
for _, m := range matches {
require.Nil(t, m.Backfill)
require.Equal(t, playersPerMatch, len(m.Tickets))
}
})
}
}
func withOpenSlots(openSlots int) *pb.Backfill {
val, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: int32(openSlots)})
if err != nil {
panic(err)
}
return &pb.Backfill{
Extensions: map[string]*any.Any{
openSlotsKey: val,
},
}
}

@ -1,59 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"log"
"net"
"google.golang.org/grpc"
"open-match.dev/open-match/pkg/pb"
)
func Start(queryServiceAddr string, serverPort int) {
// Connect to QueryService.
conn, err := grpc.Dial(queryServiceAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Failed to connect to Open Match, got %s", err.Error())
}
defer conn.Close()
mmfService := matchFunctionService{
queryServiceClient: pb.NewQueryServiceClient(conn),
}
// Create and host a new gRPC service on the configured port.
server := grpc.NewServer()
pb.RegisterMatchFunctionServer(server, &mmfService)
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", serverPort))
if err != nil {
log.Fatalf("TCP net listener initialization failed for port %v, got %s", serverPort, err.Error())
}
log.Printf("TCP net listener initialized for port %v", serverPort)
err = server.Serve(ln)
if err != nil {
log.Fatalf("gRPC serve failed, got %s", err.Error())
}
}

@ -24,8 +24,8 @@ import (
)
const (
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
queryServiceAddr = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {

@ -19,11 +19,11 @@ import (
"open-match.dev/open-match/pkg/pb"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
)
func TestMakeMatchesDeduplicate(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}},
@ -31,12 +31,12 @@ func TestMakeMatchesDeduplicate(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
require.Nil(err)
require.Equal(len(matches), 0)
assert.Nil(err)
assert.Equal(len(matches), 0)
}
func TestMakeMatches(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}, {Id: "2"}, {Id: "3"}},
@ -45,11 +45,11 @@ func TestMakeMatches(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
require.Nil(err)
require.Equal(len(matches), 3)
assert.Nil(err)
assert.Equal(len(matches), 3)
for _, match := range matches {
require.Equal(2, len(match.Tickets))
require.Equal(matchName, match.MatchFunction)
assert.Equal(2, len(match.Tickets))
assert.Equal(matchName, match.MatchFunction)
}
}

@ -40,16 +40,16 @@ var (
activeScenario = scenarios.ActiveScenario
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
mBackfillsDeleted = telemetry.Counter("scale_backend_backfills_deleted", "backfills deleted")
mBackfillDeletesFailed = telemetry.Counter("scale_backend_backfill_deletes_failed", "backfill deletes failed")
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
mTicketsDeleted = telemetry.Counter("scale_backend_tickets_deleted", "tickets deleted")
mTicketDeletesFailed = telemetry.Counter("scale_backend_ticket_deletes_failed", "ticket deletes failed")
)
// Run triggers execution of functions that continuously fetch, assign and
@ -79,28 +79,12 @@ func run(cfg config.View) {
w := logger.Writer()
defer w.Close()
matchesToAssign := make(chan *pb.Match, 30000)
matchesForAssignment := make(chan *pb.Match, 30000)
ticketsForDeletion := make(chan string, 30000)
if activeScenario.BackendAssignsTickets {
for i := 0; i < 100; i++ {
go runAssignments(be, matchesToAssign)
}
}
backfillsToDelete := make(chan *pb.Backfill, 30000)
if activeScenario.BackendDeletesBackfills {
for i := 0; i < 100; i++ {
go runDeleteBackfills(fe, backfillsToDelete)
}
}
matchesToAcknowledge := make(chan *pb.Match, 30000)
if activeScenario.BackendAcknowledgesBackfills {
for i := 0; i < 100; i++ {
go runAcknowledgeBackfills(fe, matchesToAcknowledge, backfillsToDelete)
}
for i := 0; i < 50; i++ {
go runAssignments(be, matchesForAssignment, ticketsForDeletion)
go runDeletions(fe, ticketsForDeletion)
}
// Don't go faster than this, as it likely means that FetchMatches is throwing
@ -114,7 +98,7 @@ func run(cfg config.View) {
wg.Add(1)
go func(wg *sync.WaitGroup, p *pb.MatchProfile) {
defer wg.Done()
runFetchMatches(be, p, matchesToAssign, matchesToAcknowledge)
runFetchMatches(be, p, matchesForAssignment)
}(&wg, p)
}
@ -124,13 +108,13 @@ func run(cfg config.View) {
}
}
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesToAssign chan<- *pb.Match, matchesToAcknowledge chan<- *pb.Match) {
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForAssignment chan<- *pb.Match) {
ctx, span := trace.StartSpan(context.Background(), "scale.backend/FetchMatches")
defer span.End()
req := &pb.FetchMatchesRequest{
Config: &pb.FunctionConfig{
Host: "open-match-function",
Host: "om-function",
Port: 50502,
Type: pb.FunctionConfig_GRPC,
},
@ -162,90 +146,62 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesToAs
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
if activeScenario.BackendAssignsTickets {
matchesToAssign <- resp.GetMatch()
}
if activeScenario.BackendAcknowledgesBackfills {
matchesToAcknowledge <- resp.GetMatch()
}
matchesForAssignment <- resp.GetMatch()
}
}
func runDeleteBackfills(fe pb.FrontendServiceClient, backfillsToDelete <-chan *pb.Backfill) {
for b := range backfillsToDelete {
if !activeScenario.BackfillDeleteCond(b) {
continue
}
ctx := context.Background()
_, err := fe.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: b.Id})
if err != nil {
logger.WithError(err).Errorf("failed to delete backfill: %s", b.Id)
telemetry.RecordUnitMeasurement(ctx, mBackfillDeletesFailed)
} else {
telemetry.RecordUnitMeasurement(ctx, mBackfillsDeleted)
}
}
}
func runAcknowledgeBackfills(fe pb.FrontendServiceClient, matchesToAcknowledge <-chan *pb.Match, backfillsToDelete chan<- *pb.Backfill) {
for m := range matchesToAcknowledge {
backfillId := m.Backfill.GetId()
if backfillId == "" {
continue
}
err := acknowledgeBackfill(fe, backfillId)
if err != nil {
logger.WithError(err).Errorf("failed to acknowledge backfill: %s", backfillId)
continue
}
if activeScenario.BackendDeletesBackfills {
backfillsToDelete <- m.Backfill
}
}
}
func acknowledgeBackfill(fe pb.FrontendServiceClient, backfillId string) error {
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/AcknowledgeBackfill")
defer span.End()
_, err := fe.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{
BackfillId: backfillId,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
})
return err
}
func runAssignments(be pb.BackendServiceClient, matchesToAssign <-chan *pb.Match) {
func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.Match, ticketsForDeletion chan<- string) {
ctx := context.Background()
for m := range matchesToAssign {
for m := range matchesForAssignment {
ids := []string{}
for _, t := range m.Tickets {
ids = append(ids, t.GetId())
}
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
if activeScenario.BackendAssignsTickets {
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
},
},
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
continue
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
continue
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
for _, id := range ids {
ticketsForDeletion <- id
}
}
}
func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string) {
ctx := context.Background()
for id := range ticketsForDeletion {
if activeScenario.BackendDeletesTickets {
req := &pb.DeleteTicketRequest{
TicketId: id,
}
_, err := fe.DeleteTicket(context.Background(), req)
if err == nil {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
logger.WithError(err).Error("failed to delete tickets")
}
}
}
}

@ -38,22 +38,12 @@ var (
})
activeScenario = scenarios.ActiveScenario
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
mTicketsDeleted = telemetry.Counter("scale_frontend_tickets_deleted", "tickets deleted")
mTicketDeletesFailed = telemetry.Counter("scale_frontend_ticket_deletes_failed", "ticket deletes failed")
mBackfillsCreated = telemetry.Counter("scale_frontend_backfills_created", "backfills_created")
mBackfillCreationsFailed = telemetry.Counter("scale_frontend_backfill_creations_failed", "backfill creations failed")
mTicketsTimeToAssignment = telemetry.HistogramWithBounds("scale_frontend_tickets_time_to_assignment", "tickets time to assignment", stats.UnitMilliseconds, []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000})
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
)
type ticketToWatch struct {
id string
createdAt time.Time
}
// Run triggers execution of the scale frontend component that creates
// tickets at scale in Open Match.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
@ -71,12 +61,9 @@ func run(cfg config.View) {
}
fe := pb.NewFrontendServiceClient(conn)
if activeScenario.FrontendCreatesBackfillsOnStart {
createBackfills(fe, activeScenario.FrontendTotalBackfillsToCreate)
}
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
totalCreated := 0
for range time.Tick(time.Second) {
@ -102,27 +89,13 @@ func runner(fe pb.FrontendServiceClient) {
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
g.start(mRunnersCreating)
createdAt := time.Now()
id, err := createTicket(ctx, fe)
if err != nil {
logger.WithError(err).Error("failed to create a ticket")
return
}
err = watchAssignments(ctx, fe, ticketToWatch{id: id, createdAt: createdAt})
if err != nil {
logger.WithError(err).Errorf("failed to get ticket assignment: %s", id)
} else {
ms := time.Since(createdAt).Nanoseconds() / 1e6
stats.Record(ctx, mTicketsTimeToAssignment.M(ms))
}
if activeScenario.FrontendDeletesTickets {
err = deleteTicket(ctx, fe, id)
if err != nil {
logger.WithError(err).Errorf("failed to delete ticket: %s", id)
}
}
_ = id
}
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
@ -143,68 +116,6 @@ func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, err
return resp.Id, nil
}
func watchAssignments(ctx context.Context, fe pb.FrontendServiceClient, ticket ticketToWatch) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := fe.WatchAssignments(ctx, &pb.WatchAssignmentsRequest{TicketId: ticket.id})
if err != nil {
return err
}
var a *pb.Assignment
for a.GetConnection() == "" {
resp, err := stream.Recv()
if err != nil {
return err
}
a = resp.Assignment
}
return nil
}
func createBackfills(fe pb.FrontendServiceClient, numBackfillsToCreate int) error {
for i := 0; i < numBackfillsToCreate; i++ {
err := createBackfill(fe)
if err != nil {
return err
}
}
return nil
}
func createBackfill(fe pb.FrontendServiceClient) error {
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateBackfill")
defer span.End()
req := pb.CreateBackfillRequest{
Backfill: activeScenario.Backfill(),
}
_, err := fe.CreateBackfill(ctx, &req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mBackfillCreationsFailed)
logger.WithError(err).Error("failed to create backfill")
return err
}
telemetry.RecordUnitMeasurement(ctx, mBackfillsCreated)
return nil
}
func deleteTicket(ctx context.Context, fe pb.FrontendServiceClient, ticketId string) error {
_, err := fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticketId})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
}
return err
}
// Allows concurrent moficiation of a gauge value by modifying the concurrent
// value with a delta.
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {

@ -39,7 +39,7 @@ var (
func Run() {
activeScenario := scenarios.ActiveScenario
conn, err := grpc.Dial("open-match-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
conn, err := grpc.Dial("om-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}

@ -1,271 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package backfill
import (
"fmt"
"io"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
openSlotsKey = "open-slots"
)
func Scenario() *BackfillScenario {
ticketsPerMatch := 4
return &BackfillScenario{
TicketsPerMatch: ticketsPerMatch,
MaxTicketsPerNotFullMatch: 3,
BackfillDeleteCond: func(b *pb.Backfill) bool {
openSlots := getOpenSlots(b, ticketsPerMatch)
return openSlots <= 0
},
}
}
type BackfillScenario struct {
TicketsPerMatch int
MaxTicketsPerNotFullMatch int
BackfillDeleteCond func(*pb.Backfill) bool
}
func (s *BackfillScenario) Profiles() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
Pools: []*pb.Pool{
{
Name: poolName,
},
},
},
}
}
func (s *BackfillScenario) Ticket() *pb.Ticket {
return &pb.Ticket{}
}
func (s *BackfillScenario) Backfill() *pb.Backfill {
return &pb.Backfill{}
}
func (s *BackfillScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
return statefullMMF(p, poolBackfills, poolTickets, s.TicketsPerMatch, s.MaxTicketsPerNotFullMatch)
}
// statefullMMF is a MMF implementation which is used in scenario when we want MMF to create not full match and fill it later.
// 1. The first FetchMatches is called
// 2. MMF grabs maxTicketsPerNotFullMatch tickets and makes a match and new backfill for it
// 3. MMF sets backfill's open slots to ticketsPerMatch - maxTicketsPerNotFullMatch
// 4. MMF returns the match as a result
// 5. The second FetchMatches is called
// 6. MMF gets previously created backfill
// 7. MMF gets backfill's open slots value
// 8. MMF grabs openSlots tickets and makes a match with previously created backfill
// 9. MMF sets backfill's open slots to 0
// 10. MMF returns the match as a result
func statefullMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int, maxTicketsPerNotFullMatch int) ([]*pb.Match, error) {
var matches []*pb.Match
for pool, backfills := range poolBackfills {
tickets, ok := poolTickets[pool]
if !ok || len(tickets) == 0 {
// no tickets in pool
continue
}
// process backfills first
for _, b := range backfills {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
openSlots := getOpenSlots(b, ticketsPerMatch)
if openSlots <= 0 {
// no free open slots
continue
}
if l > openSlots {
l = openSlots
}
setOpenSlots(b, openSlots-l)
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: b,
})
tickets = tickets[l:]
}
// create not full matches with backfill
for {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
if l > maxTicketsPerNotFullMatch {
l = maxTicketsPerNotFullMatch
}
b := pb.Backfill{}
setOpenSlots(&b, ticketsPerMatch-l)
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: &b,
AllocateGameserver: true,
})
tickets = tickets[l:]
}
}
return matches, nil
}
func getOpenSlots(b *pb.Backfill, defaultVal int) int {
if b.Extensions == nil {
return defaultVal
}
any, ok := b.Extensions[openSlotsKey]
if !ok {
return defaultVal
}
var val wrappers.Int32Value
err := ptypes.UnmarshalAny(any, &val)
if err != nil {
panic(err)
}
return int(val.Value)
}
func setOpenSlots(b *pb.Backfill, val int) {
if b.Extensions == nil {
b.Extensions = make(map[string]*any.Any)
}
any, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: int32(val)})
if err != nil {
panic(err)
}
b.Extensions[openSlotsKey] = any
}
// statelessMMF is a MMF implementation which is used in scenario when we want MMF to fill backfills created by a Gameserver. It doesn't create
// or update any backfill.
// 1. FetchMatches is called
// 2. MMF gets a backfill
// 3. MMF grabs ticketsPerMatch tickets and makes a match with the backfill
// 4. MMF returns the match as a result
func statelessMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int) ([]*pb.Match, error) {
var matches []*pb.Match
for pool, backfills := range poolBackfills {
tickets, ok := poolTickets[pool]
if !ok || len(tickets) == 0 {
// no tickets in pool
continue
}
for _, b := range backfills {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
if l > ticketsPerMatch && ticketsPerMatch > 0 {
l = ticketsPerMatch
}
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: b,
})
tickets = tickets[l:]
}
}
return matches, nil
}
func (s *BackfillScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
tickets := map[string]struct{}{}
backfills := map[string]struct{}{}
matchIds := []string{}
outer:
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read evaluator input stream: %w", err)
}
m := req.GetMatch()
if _, ok := backfills[m.Backfill.Id]; ok {
continue outer
}
for _, t := range m.Tickets {
if _, ok := tickets[t.Id]; ok {
continue outer
}
}
for _, t := range m.Tickets {
tickets[t.Id] = struct{}{}
}
matchIds = append(matchIds, m.GetMatchId())
}
for _, id := range matchIds {
err := stream.Send(&pb.EvaluateResponse{MatchId: id})
if err != nil {
return fmt.Errorf("failed to sending evaluator output stream: %w", err)
}
}
return nil
}

@ -78,11 +78,7 @@ func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
}
}
func (b *BattleRoyalScenario) Backfill() *pb.Backfill {
return nil
}
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
@ -105,7 +101,7 @@ func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolBackfills ma
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and receiving at the
// TODO: once the evaluator client supports sending and recieving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

@ -33,7 +33,7 @@ func Scenario() *FirstMatchScenario {
type FirstMatchScenario struct {
}
func (*FirstMatchScenario) Profiles() []*pb.MatchProfile {
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
@ -46,15 +46,11 @@ func (*FirstMatchScenario) Profiles() []*pb.MatchProfile {
}
}
func (*FirstMatchScenario) Ticket() *pb.Ticket {
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
return &pb.Ticket{}
}
func (*FirstMatchScenario) Backfill() *pb.Backfill {
return nil
}
func (*FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
tickets := poolTickets[poolName]
var matches []*pb.Match
@ -72,10 +68,10 @@ func (*FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[s
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (*FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and receiving at the
// TODO: once the evaluator client supports sending and recieving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

@ -19,15 +19,16 @@ import (
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/examples/scale/scenarios/backfill"
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
var (
queryServiceAddress = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",
@ -39,14 +40,11 @@ type GameScenario interface {
// Ticket creates a new ticket, with randomized parameters.
Ticket() *pb.Ticket
// Backfill creates a new backfill, with randomized parameters.
Backfill() *pb.Backfill
// Profiles lists all of the profiles that should run.
Profiles() []*pb.MatchProfile
// MatchFunction is the custom logic implementation of the match function.
MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
// Evaluate is the custom logic implementation of the evaluator.
Evaluate(stream pb.Evaluator_EvaluateServer) error
@ -58,26 +56,18 @@ var ActiveScenario = func() *Scenario {
// TODO: Select which scenario to use based on some configuration or choice,
// so it's easier to run different scenarios without changing code.
//gs = battleroyal.Scenario()
//gs = teamshooter.Scenario()
s := backfill.Scenario()
gs = s
gs = battleroyal.Scenario()
gs = teamshooter.Scenario()
return &Scenario{
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
FrontendCreatesBackfillsOnStart: true,
FrontendTotalBackfillsToCreate: 1000,
FrontendDeletesTickets: true,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: false,
BackendAcknowledgesBackfills: true,
BackendDeletesBackfills: true,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: gs.Ticket,
Backfill: gs.Backfill,
BackfillDeleteCond: s.BackfillDeleteCond,
Profiles: gs.Profiles,
Ticket: gs.Ticket,
Profiles: gs.Profiles,
MMF: queryPoolsWrapper(gs.MatchFunction),
Evaluator: gs.Evaluate,
@ -97,23 +87,17 @@ type Scenario struct {
// TicketExtensionSize int
// PendingTicketNumber int
// MatchExtensionSize int
FrontendTicketCreatedQPS uint32
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
FrontendTotalBackfillsToCreate int
FrontendCreatesBackfillsOnStart bool
FrontendDeletesTickets bool
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
FrontendTicketCreatedQPS uint32
// GameBackend Configs
// ProfileNumber int
// FilterNumber int
BackendAssignsTickets bool
BackendAcknowledgesBackfills bool
BackendDeletesBackfills bool
BackendAssignsTickets bool
BackendDeletesTickets bool
Ticket func() *pb.Ticket
Backfill func() *pb.Backfill
BackfillDeleteCond func(*pb.Backfill) bool
Profiles func() []*pb.MatchProfile
Ticket func() *pb.Ticket
Profiles func() []*pb.MatchProfile
MMF matchFunction
Evaluator evaluatorFunction
@ -138,7 +122,7 @@ func getQueryServiceGRPCClient() pb.QueryServiceClient {
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
@ -152,12 +136,7 @@ func queryPoolsWrapper(mmf func(req *pb.MatchProfile, poolBackfills map[string][
return err
}
poolBackfills, err := matchfunction.QueryBackfillPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolBackfills, poolTickets)
proposals, err := mmf(req.GetProfile(), poolTickets)
if err != nil {
return err
}

@ -154,13 +154,9 @@ func (t *TeamShooterScenario) Ticket() *pb.Ticket {
}
}
func (t *TeamShooterScenario) Backfill() *pb.Backfill {
return nil
}
// MatchFunction puts tickets into matches based on their skill, finding the
// required number of tickets for a game within the maximum skill difference.
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
skill := func(t *pb.Ticket) float64 {
return t.SearchFields.DoubleArgs[skillArg]
}

79
go.mod

@ -18,49 +18,56 @@ module open-match.dev/open-match
go 1.14
require (
contrib.go.opencensus.io/exporter/jaeger v0.2.1
contrib.go.opencensus.io/exporter/ocagent v0.7.0
contrib.go.opencensus.io/exporter/prometheus v0.2.0
contrib.go.opencensus.io/exporter/stackdriver v0.13.4
github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb
cloud.google.com/go v0.47.0 // indirect
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/ocagent v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
github.com/TV4/logrus-stackdriver-formatter v0.1.0
github.com/alicebob/miniredis/v2 v2.14.1
github.com/aws/aws-sdk-go v1.35.26 // indirect
github.com/alicebob/miniredis/v2 v2.11.0
github.com/apache/thrift v0.13.0 // indirect
github.com/aws/aws-sdk-go v1.25.27 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/fsnotify/fsnotify v1.4.9
github.com/go-redsync/redsync/v4 v4.3.0
github.com/fsnotify/fsnotify v1.4.7
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/protobuf v1.4.3
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/golang/protobuf v1.3.2
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/pelletier/go-toml v1.8.1 // indirect
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.8.0
github.com/pseudomuto/protoc-gen-doc v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/grpc-ecosystem/grpc-gateway v1.12.0
github.com/imdario/mergo v0.3.8 // indirect
github.com/json-iterator/go v1.1.8 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/pelletier/go-toml v1.6.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.2.1
github.com/pseudomuto/protoc-gen-doc v1.3.2 // indirect
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.7.0
github.com/spf13/afero v1.4.1 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/spf13/afero v1.2.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0
go.opencensus.io v0.23.0
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 // indirect
google.golang.org/api v0.35.0 // indirect
google.golang.org/genproto v0.0.0-20210224155714-063164c882e6
google.golang.org/grpc v1.36.0
google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stretchr/testify v1.4.0
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.13.0 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6
google.golang.org/grpc v1.25.0
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.0.0-20191004102349-159aefb8556b // kubernetes-1.14.10
k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689 // kubernetes-1.14.10
k8s.io/client-go v11.0.1-0.20191029005444-8e4128053008+incompatible // kubernetes-1.14.10
gopkg.in/yaml.v2 v2.2.5 // indirect
k8s.io/api v0.0.0-20191004102255-dacd7df5a50b // kubernetes-1.13.12
k8s.io/apimachinery v0.0.0-20191004074956-01f8b7d1121a // kubernetes-1.13.12
k8s.io/client-go v0.0.0-20191004102537-eb5b9a8cfde7 // kubernetes-1.13.12
k8s.io/klog v1.0.0 // indirect
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
sigs.k8s.io/yaml v1.1.0 // indirect
)

697
go.sum

File diff suppressed because it is too large Load Diff

@ -13,13 +13,13 @@
# limitations under the License.
apiVersion: v2
appVersion: "1.3.0"
version: 1.3.0
appVersion: "1.0.0"
version: 1.0.0
name: open-match
dependencies:
- name: redis
version: 12.3.3
repository: https://charts.bitnami.com/bitnami
version: 9.5.0
repository: https://kubernetes-charts.storage.googleapis.com/
condition: open-match-core.redis.enabled
- name: open-match-telemetry
version: 0.0.0-dev

@ -1,20 +0,0 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{- define "openmatchcustomize.function.hostName" -}}
{{- .Values.function.hostName | default (printf "%s-function" (include "openmatch.fullname" . ) ) -}}
{{- end -}}

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.evaluator.hostName" . }}
name: {{ .Values.evaluator.hostName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "openmatch.name" . }}
@ -82,13 +82,12 @@ spec:
component: evaluator
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.evaluatorConfigs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.evaluator.hostName" . }}
- name: {{ .Values.evaluator.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatchcustomize.function.hostName" . }}
name: {{ .Values.function.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -83,13 +83,12 @@ spec:
component: matchfunction
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.mmfConfigs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.mmfConfigs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchcustomize.function.hostName" . }}
- name: {{ .Values.function.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.mmfConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -35,13 +35,11 @@ evaluatorConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
customize:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override
mmfConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
@ -50,10 +48,8 @@ mmfConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
customize:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override

@ -18,15 +18,13 @@
"links": [],
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 28,
"panels": [],
"id": 16,
"title": "Iterations",
"type": "row"
},
@ -132,317 +130,11 @@
"x": 0,
"y": 9
},
"id": 16,
"panels": [],
"title": "Backfills",
"type": "row"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 10
},
"id": 30,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_backfills_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backfilld Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_backend_backfill_deletes_failed[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backfill Deletions Failed per second",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Backfill Deletion",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 18
},
"id": 14,
"panels": [],
"title": "Tickets",
"type": "row"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 19
},
"id": 26,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": false,
"min": false,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "99%-ile",
"refId": "A"
},
{
"expr": "histogram_quantile(0.95, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "95%-ile",
"refId": "B"
},
{
"expr": "histogram_quantile(0.90, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "90%-ile",
"refId": "C"
},
{
"expr": "histogram_quantile(0.50, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "50%-ile",
"refId": "D"
},
{
"expr": "histogram_quantile(0.10, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "10%-ile",
"refId": "E"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Time to Assignment",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "ms",
"label": null,
"logBase": 2,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 19
},
"id": 12,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Tickets in Matches pers second",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Tickets In Matches",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
@ -454,7 +146,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 27
"y": 10
},
"id": 2,
"legend": {
@ -550,12 +242,12 @@
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"h": 9,
"w": 12,
"x": 12,
"y": 28
"y": 10
},
"id": 22,
"id": 12,
"legend": {
"avg": false,
"current": false,
@ -580,26 +272,18 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_frontend_tickets_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backend Tickets Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_frontend_ticket_deletes_failed[5m]))",
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Ticket Deletions Failed per second",
"refId": "C"
"legendFormat": "Backend Tickets in Matches pers second",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Deletion",
"title": "Tickets In Matches",
"tooltip": {
"shared": true,
"sort": 0,
@ -647,7 +331,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 36
"y": 19
},
"id": 24,
"legend": {
@ -730,13 +414,106 @@
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 19
},
"id": 22,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_tickets_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backend Tickets Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_backend_ticket_deletes_failed[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Ticket Deletions Failed per second",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Deletion",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 44
"y": 27
},
"id": 18,
"panels": [],
@ -753,7 +530,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 45
"y": 28
},
"id": 6,
"legend": {
@ -839,7 +616,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 45
"y": 28
},
"id": 19,
"legend": {
@ -928,7 +705,7 @@
"h": 1,
"w": 24,
"x": 0,
"y": 53
"y": 36
},
"id": 21,
"panels": [],
@ -945,7 +722,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 54
"y": 37
},
"id": 8,
"legend": {
@ -1030,7 +807,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 54
"y": 37
},
"id": 10,
"legend": {
@ -1113,7 +890,7 @@
}
}
],
"refresh": "10s",
"refresh": "",
"schemaVersion": 18,
"style": "dark",
"tags": [],

@ -1,42 +0,0 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openmatchscale.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "openmatchscale.scaleBackend.hostName" -}}
{{- .Values.scaleBackend.hostName | default (printf "%s-backend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatchscale.scaleFrontend.hostName" -}}
{{- .Values.scaleFrontend.hostName | default (printf "%s-frontend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
name: {{ .Values.scaleBackend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
name: {{ .Values.scaleBackend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchscale.scaleBackend.hostName" . }}
- name: {{ .Values.scaleBackend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
name: {{ .Values.scaleFrontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
name: {{ .Values.scaleFrontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
- name: {{ .Values.scaleFrontend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatchscale.fullname" . }}-dashboard
name: open-match-scale-dashboard
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"

@ -13,13 +13,13 @@
# limitations under the License.
scaleFrontend:
hostName:
hostName: om-scale-frontend
httpPort: 51509
replicas: 1
image: openmatch-scale-frontend
scaleBackend:
hostName:
hostName: om-scale-backend
httpPort: 51509
replicas: 1
image: openmatch-scale-backend
@ -28,10 +28,8 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override

@ -20,14 +20,14 @@ version: 0.0.0-dev
dependencies:
- name: prometheus
version: 9.2.0
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts.storage.googleapis.com/
condition: global.telemetry.prometheus.enabled,prometheus.enabled
- name: grafana
version: 4.0.1
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts.storage.googleapis.com/
condition: global.telemetry.grafana.enabled,grafana.enabled
- name: jaeger
version: 0.13.3
repository: https://charts.helm.sh/stable
repository: https://kubernetes-charts-incubator.storage.googleapis.com/
condition: global.telemetry.jaeger.enabled,jaeger.enabled

@ -62,7 +62,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (pod_name) (\n\nsum(\n rate(container_cpu_usage_seconds_total{container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))\n) by (pod_name, container_name)\n\n/\n\nsum(\n (container_spec_cpu_quota{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n /\n (container_spec_cpu_period{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n) by (pod_name, container_name)\n\n*\n\n100\n)\n",
"expr": "avg by (pod_name) (\n sum(\n rate(container_cpu_usage_seconds_total{pod_name=~\"om-.*\", container_name!=\"POD\"}[5m])\n ) by (pod_name, container_name) \n \n /\n \n sum(\n container_spec_cpu_quota{pod_name=~\"om-.*\", container_name!=\"POD\"} / container_spec_cpu_period{pod_name=~\"om-.*\", container_name!=\"POD\"}\n ) by (pod_name, container_name) \n \n * \n \n 100\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}}",
@ -155,7 +155,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_goroutines{app=~\"open-match\"})",
"expr": "avg by (component) (go_goroutines{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -256,7 +256,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\"})",
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -265,7 +265,7 @@
"step": 4
},
{
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\"})",
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -365,7 +365,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -374,7 +374,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -475,7 +475,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -484,7 +484,7 @@
"step": 4
},
{
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - alloc rate",
@ -493,7 +493,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -502,7 +502,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\"})",
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -604,7 +604,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -613,7 +613,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -622,7 +622,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -719,7 +719,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (process_open_fds{app=~\"open-match\"})",
"expr": "avg by (component) (process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -815,7 +815,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\"}[$interval]))",
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -911,7 +911,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\"})",
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}: {{quantile}}",

@ -348,14 +348,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"redis\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}} usage",
"refId": "A"
},
{
"expr": "sum(kube_pod_container_resource_limits_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"expr": "sum(kube_pod_container_resource_limits_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@ -363,7 +363,7 @@
"refId": "B"
},
{
"expr": "sum(kube_pod_container_resource_requests_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"expr": "sum(kube_pod_container_resource_requests_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "request",

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.fullname" . }}-dashboards
name: open-match-dashboards
labels:
grafana_dashboard: "1"
data:

@ -1,31 +0,0 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.global.telemetry.grafana.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.fullname" . }}-datasource
labels:
grafana_datasource: "1"
data:
datasource.yaml: |-
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: {{ tpl .Values.global.telemetry.grafana.prometheusServer . }}
access: proxy
isDefault: true
{{- end }}

@ -142,10 +142,17 @@ grafana:
notifiers: {}
sidecar:
dashboards:
enabled: true
datasources:
enabled: true
enabled: true
plugins: grafana-piechart-panel
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://open-match-prometheus-server.{{ .Release.Namespace }}.svc.cluster.local:80/
access: proxy
isDefault: true
jaeger:
enabled: true

@ -22,26 +22,6 @@ Expand the name of the chart.
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
Instead of .Chart.Name, we hard-code "open-match" as we need to call this from subcharts, but get the
same result as if called from this chart.
*/}}
{{- define "openmatch.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default "open-match" .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Render chart metadata labels: "chart", "heritage" unless "openmatch.noChartMeta" is set.
*/}}
@ -77,7 +57,7 @@ resources:
{{- range $configIndex, $configValues := .configs }}
- name: {{ $configValues.volumeName }}
configMap:
name: {{ tpl $configValues.configName $ }}
name: {{ $configValues.configName }}
{{- end }}
{{- end -}}
@ -94,10 +74,10 @@ resources:
{{- if .Values.global.tls.enabled }}
- name: tls-server-volume
secret:
secretName: {{ include "openmatch.fullname" . }}-tls-server
secretName: om-tls-server
- name: root-ca-volume
secret:
secretName: {{ include "openmatch.fullname" . }}-tls-rootca
secretName: om-tls-rootca
{{- end -}}
{{- end -}}
@ -112,7 +92,7 @@ resources:
{{- if .Values.redis.usePassword }}
- name: redis-password
secret:
secretName: {{ include "call-nested" (list . "redis" "redis.fullname") }}
secretName: {{ .Values.redis.fullnameOverride }}
{{- end -}}
{{- end -}}
@ -155,72 +135,3 @@ minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.serviceAccount.name" -}}
{{- .Values.global.kubernetes.serviceAccount | default (printf "%s-unprivileged-service" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.swaggerui.hostName" -}}
{{- .Values.swaggerui.hostName | default (printf "%s-swaggerui" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.query.hostName" -}}
{{- .Values.query.hostName | default (printf "%s-query" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.frontend.hostName" -}}
{{- .Values.frontend.hostName | default (printf "%s-frontend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.backend.hostName" -}}
{{- .Values.backend.hostName | default (printf "%s-backend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.synchronizer.hostName" -}}
{{- .Values.synchronizer.hostName | default (printf "%s-synchronizer" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.evaluator.hostName" -}}
{{- .Values.evaluator.hostName | default (printf "%s-evaluator" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.configmap.default" -}}
{{- printf "%s-configmap-default" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.configmap.override" -}}
{{- printf "%s-configmap-override" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.jaeger.agent" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.agent.name") }}:6831
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "openmatch.jaeger.collector" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
http://{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.collector.name") }}:14268/api/traces
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Call templates from sub-charts in a synthesized context, workaround for https://github.com/helm/helm/issues/3920
Mainly useful for things like `{{ include "call-nested" (list . "redis" "redis.fullname") }}`
https://github.com/helm/helm/issues/4535#issuecomment-416022809
https://github.com/helm/helm/issues/4535#issuecomment-477778391
*/}}
{{- define "call-nested" }}
{{- $dot := index . 0 }}
{{- $subchart := index . 1 | splitList "." }}
{{- $template := index . 2 }}
{{- $values := $dot.Values }}
{{- range $subchart }}
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.backend.hostName" . }}
name: {{ .Values.backend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.backend.hostName" . }}
- name: {{ .Values.backend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.frontend.hostName" . }}
name: {{ .Values.frontend.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.frontend.hostName" . }}
- name: {{ .Values.frontend.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.configmap.default" . }}
name: om-configmap-default
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -50,28 +50,28 @@ data:
api:
backend:
hostname: "{{ include "openmatch.backend.hostName" . }}"
hostname: "{{ .Values.backend.hostName }}"
grpcport: "{{ .Values.backend.grpcPort }}"
httpport: "{{ .Values.backend.httpPort }}"
frontend:
hostname: "{{ include "openmatch.frontend.hostName" . }}"
hostname: "{{ .Values.frontend.hostName }}"
grpcport: "{{ .Values.frontend.grpcPort }}"
httpport: "{{ .Values.frontend.httpPort }}"
query:
hostname: "{{ include "openmatch.query.hostName" . }}"
hostname: "{{ .Values.query.hostName }}"
grpcport: "{{ .Values.query.grpcPort }}"
httpport: "{{ .Values.query.httpPort }}"
synchronizer:
hostname: "{{ include "openmatch.synchronizer.hostName" . }}"
hostname: "{{ .Values.synchronizer.hostName }}"
grpcport: "{{ .Values.synchronizer.grpcPort }}"
httpport: "{{ .Values.synchronizer.httpPort }}"
swaggerui:
hostname: "{{ include "openmatch.swaggerui.hostName" . }}"
hostname: "{{ .Values.swaggerui.hostName }}"
httpport: "{{ .Values.swaggerui.httpPort }}"
# Configurations for api.test and api.scale are used for testing.
test:
hostname: "{{ include "openmatch.fullname" . }}-test"
hostname: "test"
grpcport: "50509"
httpport: "51509"
scale:
@ -90,11 +90,11 @@ data:
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}
sentinelHostname: {{ .Values.redis.fullnameOverride }}
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
hostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
hostname: {{ .Values.redis.fullnameOverride }}-master.{{ .Release.Namespace }}.svc.cluster.local
port: {{ .Values.redis.redisPort }}
user: {{ .Values.redis.user }}
{{- end}}
@ -119,13 +119,8 @@ data:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
{{- if .Values.global.telemetry.jaeger.enabled }}
agentEndpoint: "{{ tpl .Values.global.telemetry.jaeger.agentEndpoint . }}"
collectorEndpoint: "{{ tpl .Values.global.telemetry.jaeger.collectorEndpoint . }}"
{{- else }}
agentEndpoint: ""
collectorEndpoint: ""
{{- end }}
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.configmap.override" . }}
name: om-configmap-override
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,10 +40,9 @@ data:
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
backfillLockTimeout: {{ index .Values "open-match-core" "backfillLockTimeout" }}
api:
evaluator:
hostname: "{{ include "openmatch.evaluator.hostName" . }}"
hostname: "{{ .Values.evaluator.hostName }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
{{- end }}

@ -14,11 +14,11 @@
{{- if index .Values "open-match-core" "enabled" }}
{{- if empty .Values.ci }}
# This is the least restricted PSP used to create privileged pods to disable THP in host kernel.
# om-redis-podsecuritypolicy is the least restricted PSP used to create privileged pods to disable THP in host kernel.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
name: om-redis-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
@ -51,11 +51,11 @@ spec:
fsGroup:
rule: 'RunAsAny'
---
# This does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
# om-core-podsecuritypolicy does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
name: om-core-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.query.hostName" . }}
name: {{ .Values.query.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.query.hostName" . }}
- name: {{ .Values.query.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -29,7 +29,7 @@ metadata:
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openmatch.serviceAccount.name" . }}
name: {{ .Values.global.kubernetes.serviceAccount }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,26 +40,28 @@ automountServiceAccountToken: true
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role
name: om-service-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-service-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
- om-core-podsecuritypolicy
verbs:
- use
---
# This applies om-service-role to the open-match unprivileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role-binding
name: om-service-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -71,33 +73,34 @@ subjects:
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-service-role
name: om-service-role
apiGroup: rbac.authorization.k8s.io
---
{{- if index .Values "open-match-core" "redis" "enabled" }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role
name: om-redis-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-redis-role to use om-redis-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
- om-redis-podsecuritypolicy
verbs:
- use
---
# This applies om-redis role to the om-redis privileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role-binding
name: om-redis-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -105,11 +108,10 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "call-nested" (list . "redis" "redis.serviceAccountName") }}
name: {{ .Values.redis.serviceAccount.name }} # Redis service account
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-redis-role
name: om-redis-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.swaggerui.hostName" . }}
name: {{ .Values.swaggerui.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -36,7 +36,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.swaggerui.hostName" . }}
name: {{ .Values.swaggerui.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -61,11 +61,11 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.swaggerui.hostName" . }}
- name: {{ .Values.swaggerui.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.synchronizer.hostName" . }}
name: {{ .Values.synchronizer.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,7 +40,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatch.synchronizer.hostName" . }}
name: {{ .Values.synchronizer.hostName }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -66,12 +66,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
containers:
- name: {{ include "openmatch.synchronizer.hostName" . }}
- name: {{ .Values.synchronizer.hostName }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

@ -14,10 +14,11 @@
{{- if .Values.ci }}
# This applies om-test-role to the open-match-test-service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-test-role-binding
name: om-test-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -25,11 +26,11 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "openmatch.fullname" . }}-test-service
name: open-match-test-service
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-test-role
name: om-test-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

@ -17,22 +17,23 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-test-role
name: om-test-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-test-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
- om-core-podsecuritypolicy
verbs:
- use
# Grant this role get & list permission for k8s endpoints and pods resources
# Grant om-test-role get & list permission for k8s endpoints and pods resources
# Required for e2e in-cluster testing.
- apiGroups:
- ""

@ -14,11 +14,11 @@
{{- if .Values.ci }}
# Create a service account for test services.
# Create a service account for open-match-test services.
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "openmatch.fullname" . }}-test-service
name: open-match-test-service
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:

@ -17,7 +17,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "openmatch.fullname" . }}-test
name: test
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,7 +40,7 @@ spec:
apiVersion: v1
kind: Pod
metadata:
name: {{ include "openmatch.fullname" . }}-test
name: test
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
@ -52,19 +52,19 @@ metadata:
spec:
# Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.
activeDeadlineSeconds: 900
serviceAccountName: {{ include "openmatch.fullname" . }}-test-service
serviceAccountName: open-match-test-service
automountServiceAccountToken: true
volumes:
- configMap:
defaultMode: 420
name: {{ include "openmatch.configmap.default" . }}
name: om-configmap-default
name: om-config-volume-default
- configMap:
defaultMode: 420
name: {{ include "openmatch.configmap.override" . }}
name: om-configmap-override
name: om-config-volume-override
containers:
- name: {{ include "openmatch.fullname" . }}-test
- name: "test"
volumeMounts:
- mountPath: /app/config/default
name: om-config-volume-default

@ -17,7 +17,7 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openmatch.fullname" . }}-tls-rootca
name: om-tls-rootca
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -31,9 +31,9 @@ data:
apiVersion: v1
kind: Secret
metadata:
name: {{ include "openmatch.fullname" . }}-tls-server
name: om-tls-server
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
annotations: {{- include "openmatch.chartmeta" . | nindent 2 }}
labels:
app: {{ template "openmatch.name" . }}
component: tls

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Override the default in-cluster domain name for the `query` service to om-query.
# # Specifies om-query as the in-cluster domain name for the `query` service.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,68 +44,67 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName:
hostName: om-swaggerui
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName:
hostName: om-query
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName:
hostName: om-frontend
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName:
hostName: om-backend
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName:
hostName: om-synchronizer
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName:
hostName: om-evaluator
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName:
hostName: om-function
grpcPort: 50502
httpPort: 51502
replicas: 3
# Specifies the location and name of the Open Match application-level config volumes.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -134,6 +133,7 @@ redis:
slaveCount: 3
serviceAccount:
create: true
name: open-match-redis-service
slave:
persistence:
enabled: false
@ -174,7 +174,7 @@ open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
@ -188,8 +188,6 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -197,7 +195,7 @@ open-match-core:
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 500
maxActive: 500
@ -210,6 +208,8 @@ open-match-core:
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -222,6 +222,7 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -248,8 +249,8 @@ global:
limits:
memory: 3Gi
cpu: 2
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -271,9 +272,10 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.0.0-dev
tag: 1.0.0
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
@ -284,8 +286,8 @@ global:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
enabled: false
endpoint: "/metrics"

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Override the default in-cluster domain name for the `query` service to om-query.
# # Specifies om-query as the in-cluster domain name for the `query` service.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,46 +44,46 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName:
hostName: om-swaggerui
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName:
hostName: om-query
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName:
hostName: om-frontend
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName:
hostName: om-backend
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName:
hostName: om-synchronizer
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName:
hostName: om-evaluator
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName:
hostName: om-function
grpcPort: 50502
httpPort: 51502
replicas: 3
@ -94,18 +94,17 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
configName: om-configmap-default
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
configName: om-configmap-override
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -117,7 +116,6 @@ redis:
enabled: true
masterSet: om-redis-master
port: 26379
usePassword: false
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -130,6 +128,7 @@ redis:
slaveCount: 2
serviceAccount:
create: true
name: open-match-redis-service
sysctlImage:
# Enable this setting in production if you are running Open Match under Linux environment
enabled: false
@ -160,7 +159,7 @@ open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
@ -174,8 +173,6 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -183,7 +180,7 @@ open-match-core:
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 200
maxActive: 0
@ -196,6 +193,8 @@ open-match-core:
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -208,6 +207,7 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -234,8 +234,8 @@ global:
limits:
memory: 100Mi
cpu: 100m
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -257,9 +257,10 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 1.3.0
tag: 1.0.0
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
@ -270,8 +271,8 @@ global:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
enabled: false
endpoint: "/metrics"
@ -281,5 +282,3 @@ global:
prefix: "open_match"
grafana:
enabled: false
# This will be called with `tpl` in the open-match-telemetry subchart namespace.
prometheusServer: 'http://{{ include "call-nested" (list . "prometheus" "prometheus.server.fullname") }}.{{ .Release.Namespace }}.svc.cluster.local:80/'

@ -1,26 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openmatch.internal;
option go_package = "open-match.dev/open-match/internal/ipb";
import "api/messages.proto";
message BackfillInternal {
// Represents a backfill entity which is used to fill partially full matches
openmatch.Backfill backfill = 1;
// List of ticket IDs associated with a current backfill
repeated string ticket_ids = 2;
}

@ -26,11 +26,10 @@ import (
)
var (
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
ticketsTimeToAssignment = stats.Int64("open-match.dev/backend/ticket_time_to_assignment", "Time to assignment for tickets", stats.UnitMilliseconds)
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
totalMatchesView = &view.View{
Measure: totalBytesPerMatch,
@ -62,13 +61,6 @@ var (
Description: "Number of tickets released per request",
Aggregation: view.Sum(),
}
ticketsTimeToAssignmentView = &view.View{
Measure: ticketsTimeToAssignment,
Name: "open-match.dev/backend/ticket_time_to_assignment",
Description: "Time to assignment for tickets",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the backend service and binds it to the serving harness.
@ -89,7 +81,6 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
ticketsPerMatchView,
ticketsAssignedView,
ticketsReleasedView,
ticketsTimeToAssignmentView,
)
return nil
}

@ -22,15 +22,12 @@ import (
"net/http"
"strings"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
@ -56,7 +53,6 @@ var (
"app": "openmatch",
"component": "app.backend",
})
errBackfillGenerationMismatch = errors.New("backfill generation mismatch")
)
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
@ -91,7 +87,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
return synchronizeSend(ctx, syncStream, m, proposals)
})
eg.Go(func() error {
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs, s.store)
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
})
var mmfErr error
@ -106,8 +102,13 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
// TODO: Send mmf error in FetchSummary instead of erroring call.
if syncErr != nil || mmfErr != nil {
logger.WithFields(logrus.Fields{
"syncErr": syncErr,
"mmfErr": mmfErr,
}).Error("error(s) in FetchMatches call.")
return fmt.Errorf(
"error(s) in FetchMatches call. syncErr=[%v], mmfErr=[%v]",
"error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
syncErr,
mmfErr,
)
@ -144,7 +145,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc, store statestore.Service) error {
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
var startMmfsOnce sync.Once
for {
@ -171,31 +172,6 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
if !ok {
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
}
backfill := match.GetBackfill()
if backfill != nil {
ticketIds := make([]string, 0, len(match.Tickets))
for _, t := range match.Tickets {
ticketIds = append(ticketIds, t.Id)
}
err = createOrUpdateBackfill(ctx, backfill, ticketIds, store)
if err != nil {
e, ok := status.FromError(err)
if err == errBackfillGenerationMismatch || (ok && e.Code() == codes.NotFound) {
err = doReleaseTickets(ctx, ticketIds, store)
if err != nil {
logger.WithError(err).Errorf("failed to remove match tickets from pending release: %v", ticketIds)
}
continue
}
return errors.Wrapf(err, "failed to handle match backfill: %s", match.MatchId)
}
}
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
@ -225,13 +201,17 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
var conn *grpc.ClientConn
conn, err := cc.GetGRPC(address)
if err != nil {
return status.Error(codes.InvalidArgument, "failed to establish grpc client connection to match function")
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish grpc client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
}
client := pb.NewMatchFunctionClient(conn)
stream, err := client.Run(ctx, &pb.RunRequest{Profile: profile})
if err != nil {
err = errors.Wrap(err, "failed to run match function for profile")
logger.WithError(err).Error("failed to run match function for profile")
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
@ -245,7 +225,7 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
break
}
if err != nil {
err = errors.Wrapf(err, "%v.Run() error, %v", client, err)
logger.Errorf("%v.Run() error, %v\n", client, err)
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
@ -265,8 +245,11 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProfile, address string, proposals chan<- *pb.Match) error {
client, baseURL, err := cc.GetHTTP(address)
if err != nil {
err = errors.Wrapf(err, "failed to establish rest client connection to match function: %s", address)
return status.Error(codes.InvalidArgument, err.Error())
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish rest client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
}
var m jsonpb.Marshaler
@ -282,7 +265,7 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return status.Errorf(codes.Internal, "failed to get response from mmf run for profile %s: %s", profile.Name, err.Error())
return status.Errorf(codes.Internal, "failed to get response from mmf run for proile %s: %s", profile.Name, err.Error())
}
defer func() {
err = resp.Body.Close()
@ -323,25 +306,16 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
}
func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTicketsRequest) (*pb.ReleaseTicketsResponse, error) {
err := doReleaseTickets(ctx, req.GetTicketIds(), s.store)
err := doReleasetickets(ctx, req, s.store)
if err != nil {
logger.WithError(err).Error("failed to remove the awaiting tickets from the ignore list for requested tickets")
return nil, err
}
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
return &pb.ReleaseTicketsResponse{}, nil
}
func doReleaseTickets(ctx context.Context, ticketIds []string, store statestore.Service) error {
err := store.DeleteTicketsFromPendingRelease(ctx, ticketIds)
if err != nil {
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
return err
}
stats.Record(ctx, ticketsReleased.M(int64(len(ticketIds))))
return nil
}
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
err := s.store.ReleaseAllTickets(ctx)
if err != nil {
@ -354,6 +328,7 @@ func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseA
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp, err := doAssignTickets(ctx, req, s.store)
if err != nil {
logger.WithError(err).Error("failed to update assignments for requested tickets")
return nil, err
}
@ -366,69 +341,13 @@ func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicket
return resp, nil
}
func createOrUpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIds []string, store statestore.Service) error {
if backfill.Id == "" {
backfill.Id = xid.New().String()
backfill.CreateTime = ptypes.TimestampNow()
backfill.Generation = 1
err := store.CreateBackfill(ctx, backfill, ticketIds)
if err != nil {
return err
}
return store.IndexBackfill(ctx, backfill)
}
m := store.NewMutex(backfill.Id)
err := m.Lock(ctx)
if err != nil {
return err
}
defer func() {
_, unlockErr := m.Unlock(ctx)
if unlockErr != nil {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).WithError(unlockErr).Error("failed to make unlock")
}
}()
b, ids, err := store.GetBackfill(ctx, backfill.Id)
if err != nil {
return err
}
if b.Generation != backfill.Generation {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).
WithError(errBackfillGenerationMismatch).
Errorf("failed to update backfill, expecting: %d generation but got: %d", b.Generation, backfill.Generation)
return errBackfillGenerationMismatch
}
b.SearchFields = backfill.SearchFields
b.Extensions = backfill.Extensions
b.Generation++
err = store.UpdateBackfill(ctx, b, append(ids, ticketIds...))
if err != nil {
return err
}
return store.IndexBackfill(ctx, b)
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, tickets, err := store.UpdateAssignments(ctx, req)
resp, err := store.UpdateAssignments(ctx, req)
if err != nil {
logger.WithError(err).Error("failed to update assignments")
return nil, err
}
for _, ticket := range tickets {
err = recordTimeToAssignment(ctx, ticket)
if err != nil {
logger.WithError(err).Errorf("failed to record time to assignment for ticket %s", ticket.Id)
}
}
ids := []string{}
for _, ag := range req.Assignments {
@ -444,7 +363,7 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
}
}
if err = store.DeleteTicketsFromPendingRelease(ctx, ids); err != nil {
if err = store.DeleteTicketsFromIgnoreList(ctx, ids); err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": ids,
}).Error(err)
@ -453,18 +372,14 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
return resp, nil
}
func recordTimeToAssignment(ctx context.Context, ticket *pb.Ticket) error {
if ticket.Assignment == nil {
return fmt.Errorf("assignment for ticket %s is nil", ticket.Id)
}
now := time.Now()
created, err := ptypes.Timestamp(ticket.CreateTime)
func doReleasetickets(ctx context.Context, req *pb.ReleaseTicketsRequest, store statestore.Service) error {
err := store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds())
if err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": req.GetTicketIds(),
}).WithError(err).Error("failed to delete the tickets from the ignore list")
return err
}
stats.Record(ctx, ticketsTimeToAssignment.M(now.Sub(created).Milliseconds()))
return nil
}

@ -63,7 +63,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
// then returns matches which don't collide with previously returned matches.
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
matches := make([]*matchInp, 0)
nilEvaluationInputs := 0
nilEvlautionInputs := 0
for m := range in {
// Evaluation criteria is optional, but sort it lower than any matches which
@ -82,7 +82,7 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
continue
}
} else {
nilEvaluationInputs++
nilEvlautionInputs++
}
matches = append(matches, &matchInp{
match: m,
@ -90,17 +90,16 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
})
}
if nilEvaluationInputs > 0 {
if nilEvlautionInputs > 0 {
logger.WithFields(logrus.Fields{
"count": nilEvaluationInputs,
"count": nilEvlautionInputs,
}).Info("Some matches don't have the optional field evaluation_input set.")
}
sort.Sort(byScore(matches))
d := decollider{
ticketsUsed: make(map[string]*collidingMatch),
backfillsUsed: make(map[string]*collidingMatch),
ticketsUsed: make(map[string]*collidingMatch),
}
for _, m := range matches {
@ -122,25 +121,11 @@ type collidingMatch struct {
}
type decollider struct {
resultIDs []string
ticketsUsed map[string]*collidingMatch
backfillsUsed map[string]*collidingMatch
resultIDs []string
ticketsUsed map[string]*collidingMatch
}
func (d *decollider) maybeAdd(m *matchInp) {
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
if cm, ok := d.backfillsUsed[m.match.Backfill.Id]; ok {
logger.WithFields(logrus.Fields{
"match_id": m.match.GetMatchId(),
"backfill_id": m.match.Backfill.Id,
"match_score": m.inp.GetScore(),
"colliding_match_id": cm.id,
"colliding_match_score": cm.score,
}).Info("Higher quality match with colliding backfill found. Rejecting match.")
return
}
}
for _, t := range m.match.GetTickets() {
if cm, ok := d.ticketsUsed[t.Id]; ok {
logger.WithFields(logrus.Fields{
@ -154,13 +139,6 @@ func (d *decollider) maybeAdd(m *matchInp) {
}
}
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
d.backfillsUsed[m.match.Backfill.Id] = &collidingMatch{
id: m.match.GetMatchId(),
score: m.inp.GetScore(),
}
}
for _, t := range m.match.GetTickets() {
d.ticketsUsed[t.Id] = &collidingMatch{
id: m.match.GetMatchId(),

@ -21,7 +21,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/pkg/pb"
)
@ -37,9 +37,6 @@ func TestEvaluate(t *testing.T) {
ticket1 := &pb.Ticket{Id: "1"}
ticket2 := &pb.Ticket{Id: "2"}
ticket3 := &pb.Ticket{Id: "3"}
backfill0 := &pb.Backfill{}
backfill1 := &pb.Backfill{Id: "1"}
backfill2 := &pb.Backfill{Id: "2"}
ticket12Score1 := &pb.Match{
MatchId: "ticket12Score1",
@ -81,61 +78,6 @@ func TestEvaluate(t *testing.T) {
},
}
ticket1Backfill0Score1 := &pb.Match{
MatchId: "ticket1Backfill0Score1",
Tickets: []*pb.Ticket{ticket1},
Backfill: backfill0,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket2Backfill0Score1 := &pb.Match{
MatchId: "ticket2Backfill0Score1",
Tickets: []*pb.Ticket{ticket2},
Backfill: backfill0,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score1 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score10 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 10,
}),
},
}
ticket12Backfill2Score5 := &pb.Match{
MatchId: "ticket12Backfill2Score5",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill2,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 5,
}),
},
}
tests := []struct {
description string
testMatches []*pb.Match
@ -166,16 +108,6 @@ func TestEvaluate(t *testing.T) {
testMatches: []*pb.Match{ticket12Score1, ticket12Score10, ticket123Score5, ticket3Score50},
wantMatchIDs: []string{ticket12Score10.GetMatchId(), ticket3Score50.GetMatchId()},
},
{
description: "test evaluator ignores backfills with empty id",
testMatches: []*pb.Match{ticket1Backfill0Score1, ticket2Backfill0Score1},
wantMatchIDs: []string{ticket1Backfill0Score1.GetMatchId(), ticket2Backfill0Score1.GetMatchId()},
},
{
description: "test deduplicates matches by backfill and tickets and returns match with higher score",
testMatches: []*pb.Match{ticket12Backfill1Score1, ticket12Backfill1Score10, ticket12Backfill2Score5},
wantMatchIDs: []string{ticket12Backfill1Score10.GetMatchId()},
},
}
for _, test := range tests {
@ -190,17 +122,17 @@ func TestEvaluate(t *testing.T) {
close(in)
err := evaluate(context.Background(), in, out)
require.Nil(t, err)
assert.Nil(t, err)
gotMatchIDs := []string{}
close(out)
for id := range out {
gotMatchIDs = append(gotMatchIDs, id)
}
require.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
assert.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
for _, mID := range gotMatchIDs {
require.Contains(t, test.wantMatchIDs, mID)
assert.Contains(t, test.wantMatchIDs, mID)
}
})
}

@ -19,12 +19,19 @@ import (
"context"
"io"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"golang.org/x/sync/errgroup"
"open-match.dev/open-match/pkg/pb"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "evaluator.harness.golang",
})
)
// Evaluator is the function signature for the Evaluator to be implemented by
// the user. The harness will pass the Matches to evaluate to the Evaluator
// and the Evaluator will return an accepted list of Matches.
@ -88,5 +95,8 @@ func (s *evaluatorService) Evaluate(stream pb.Evaluator_EvaluateServer) error {
})
err := g.Wait()
return errors.Wrap(err, "Error in evaluator.Evaluate")
if err != nil {
logger.WithError(err).Error("Error in evaluator.Evaluate")
}
return err
}

@ -25,10 +25,8 @@ import (
)
var (
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerBackfill = stats.Int64("open-match.dev/frontend/total_bytes_per_backfill", "Total bytes per backfill", stats.UnitBytes)
searchFieldsPerBackfill = stats.Int64("open-match.dev/frontend/searchfields_per_backfill", "Searchfields per backfill", stats.UnitDimensionless)
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerTicketView = &view.View{
Measure: totalBytesPerTicket,
@ -42,18 +40,6 @@ var (
Description: "SearchFields per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
totalBytesPerBackfillView = &view.View{
Measure: totalBytesPerBackfill,
Name: "open-match.dev/frontend/total_bytes_per_backfill",
Description: "Total bytes per backfill",
Aggregation: telemetry.DefaultBytesDistribution,
}
searchFieldsPerBackfillView = &view.View{
Measure: searchFieldsPerBackfill,
Name: "open-match.dev/frontend/searchfields_per_backfill",
Description: "SearchFields per backfill",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindService creates the frontend service and binds it to the serving harness.
@ -70,8 +56,6 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
b.RegisterViews(
totalBytesPerTicketView,
searchFieldsPerTicketView,
totalBytesPerBackfillView,
searchFieldsPerBackfillView,
)
return nil
}

@ -83,147 +83,25 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
err := store.CreateTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to create the ticket")
return nil, err
}
err = store.IndexTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to index the ticket")
return nil, err
}
return ticket, nil
}
// CreateBackfill creates a new Backfill object.
// it assigns an unique Id to the input Backfill and record it in state storage.
// Set initial LastAcknowledge time for this Backfill.
// A Backfill is considered as ready for matchmaking once it is created.
// - If SearchFields exist in a Backfill, CreateBackfill will also index these fields such that one can query the ticket with query.QueryBackfills function.
func (s *frontendService) CreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest) (*pb.Backfill, error) {
// Perform input validation.
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
if req.Backfill.CreateTime != nil {
return nil, status.Errorf(codes.InvalidArgument, "backfills cannot be created with create time set")
}
return doCreateBackfill(ctx, req, s.store)
}
func doCreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest, store statestore.Service) (*pb.Backfill, error) {
// Generate an id and create a Backfill in state storage
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input ticket proto")
}
backfill.Id = xid.New().String()
backfill.CreateTime = ptypes.TimestampNow()
backfill.Generation = 1
sfCount := 0
sfCount += len(backfill.GetSearchFields().GetDoubleArgs())
sfCount += len(backfill.GetSearchFields().GetStringArgs())
sfCount += len(backfill.GetSearchFields().GetTags())
stats.Record(ctx, searchFieldsPerBackfill.M(int64(sfCount)))
stats.Record(ctx, totalBytesPerBackfill.M(int64(proto.Size(backfill))))
err := store.CreateBackfill(ctx, backfill, []string{})
if err != nil {
return nil, err
}
err = store.IndexBackfill(ctx, backfill)
if err != nil {
return nil, err
}
return backfill, nil
}
// UpdateBackfill updates a Backfill object, if present.
// Update would increment generation in Redis.
// Only Extensions and SearchFields would be updated.
// CreateTime is not changed on Update
func (s *frontendService) UpdateBackfill(ctx context.Context, req *pb.UpdateBackfillRequest) (*pb.Backfill, error) {
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input backfill proto")
}
bfID := backfill.Id
if bfID == "" {
return nil, status.Error(codes.InvalidArgument, "backfill ID should exist")
}
m := s.store.NewMutex(bfID)
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(ctx); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bfStored, associatedTickets, err := s.store.GetBackfill(ctx, bfID)
if err != nil {
return nil, err
}
// Update generation here, because Frontend is used by GameServer only
bfStored.SearchFields = backfill.SearchFields
bfStored.Extensions = backfill.Extensions
// Autoincrement generation, input backfill generation validation is performed
// on Backend only (after MMF round)
bfStored.Generation++
err = s.store.UpdateBackfill(ctx, bfStored, []string{})
if err != nil {
return nil, err
}
err = s.store.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
if err != nil {
return nil, err
}
err = s.store.IndexBackfill(ctx, bfStored)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": bfStored.Id,
}).Error("failed to index the backfill")
return nil, err
}
return bfStored, nil
}
// DeleteBackfill deletes a Backfill by its ID.
func (s *frontendService) DeleteBackfill(ctx context.Context, req *pb.DeleteBackfillRequest) (*empty.Empty, error) {
bfID := req.GetBackfillId()
if bfID == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
err := s.store.DeleteBackfillCompletely(ctx, bfID)
// Deleting of Backfill is inevitable when it is expired, so we don't worry about error here
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("error on DeleteBackfill")
}
return &empty.Empty{}, nil
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
@ -240,6 +118,10 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// Deindex this Ticket to remove it from matchmaking pool.
err := store.DeindexTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to deindex the ticket")
return err
}
@ -255,12 +137,12 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
"id": id,
}).Error("failed to delete the ticket")
}
err = store.DeleteTicketsFromPendingRelease(ctx, []string{id})
err = store.DeleteTicketsFromIgnoreList(ctx, []string{id})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to delete the ticket from pendingRelease")
}).Error("failed to delete the ticket from ignorelist")
}
// TODO: If other redis queues are implemented or we have custom index fields
// created by Open Match, those need to be cleaned up here.
@ -270,7 +152,20 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// GetTicket get the Ticket associated with the specified TicketId.
func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketRequest) (*pb.Ticket, error) {
return s.store.GetTicket(ctx, req.GetTicketId())
return doGetTickets(ctx, req.GetTicketId(), s.store)
}
func doGetTickets(ctx context.Context, id string, store statestore.Service) (*pb.Ticket, error) {
ticket, err := store.GetTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to get the ticket")
return nil, err
}
return ticket, nil
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
@ -294,10 +189,6 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
var currAssignment *pb.Assignment
var ok bool
callback := func(assignment *pb.Assignment) error {
if ctx.Err() != nil {
return status.Errorf(codes.Aborted, ctx.Err().Error())
}
if (currAssignment == nil && assignment != nil) || !proto.Equal(currAssignment, assignment) {
currAssignment, ok = proto.Clone(assignment).(*pb.Assignment)
if !ok {
@ -306,6 +197,7 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
err := sender(currAssignment)
if err != nil {
logger.WithError(err).Error("failed to send Redis response to grpc server")
return status.Errorf(codes.Aborted, err.Error())
}
}
@ -314,78 +206,3 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
return store.GetAssignments(ctx, id, callback)
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info.
// This triggers an assignment process.
func (s *frontendService) AcknowledgeBackfill(ctx context.Context, req *pb.AcknowledgeBackfillRequest) (*pb.AcknowledgeBackfillResponse, error) {
if req.GetBackfillId() == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
if req.GetAssignment() == nil {
return nil, status.Errorf(codes.InvalidArgument, ".Assignment is required")
}
m := s.store.NewMutex(req.GetBackfillId())
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(ctx); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bf, associatedTickets, err := s.store.GetBackfill(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
err = s.store.UpdateAcknowledgmentTimestamp(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
resp := &pb.AcknowledgeBackfillResponse{
Backfill: bf,
Tickets: make([]*pb.Ticket, 0),
}
if len(associatedTickets) != 0 {
setResp, tickets, err := s.store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{{TicketIds: associatedTickets, Assignment: req.GetAssignment()}},
})
if err != nil {
return nil, err
}
resp.Tickets = tickets
// log errors returned from UpdateAssignments to track tickets with NotFound errors
for _, f := range setResp.Failures {
logger.Errorf("failed to assign ticket %s, cause %d", f.TicketId, f.Cause)
}
for _, id := range associatedTickets {
err = s.store.DeindexTicket(ctx, id)
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
if err != nil {
logger.WithError(err).Errorf("failed to deindex ticket %s after updating the assignments", id)
}
}
// Remove all tickets associated with backfill, because unassigned tickets are not found only
err = s.store.UpdateBackfill(ctx, bf, []string{})
if err != nil {
return nil, err
}
}
return resp, nil
}
// GetBackfill fetches a Backfill object by its ID.
func (s *frontendService) GetBackfill(ctx context.Context, req *pb.GetBackfillRequest) (*pb.Backfill, error) {
bf, _, err := s.store.GetBackfill(ctx, req.GetBackfillId())
return bf, err
}

@ -22,9 +22,8 @@ import (
"testing"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/statestore"
@ -78,194 +77,17 @@ func TestDoCreateTickets(t *testing.T) {
test.preAction(cancel)
res, err := doCreateTicket(ctx, &pb.CreateTicketRequest{Ticket: test.ticket}, store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
require.True(t, matched)
require.NoError(t, err)
require.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
assert.True(t, matched)
assert.Nil(t, err)
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
}
})
}
}
func TestCreateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg, store}
var testCases = []struct {
description string
request *pb.CreateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.CreateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "createTime should not exist in input",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{CreateTime: ptypes.TimestampNow()}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfills cannot be created with create time set",
},
{
description: "empty Backfill, no errors",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "normal backfill",
request: &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err := fs.CreateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs = frontendService{cfg, store}
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestUpdateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg, store}
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
},
},
},
})
require.NoError(t, err)
require.NotNil(t, res)
var testCases = []struct {
description string
request *pb.UpdateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.UpdateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "empty Backfill, error with no backfill ID",
request: &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfill ID should exist",
},
{
description: "normal backfill",
request: &pb.UpdateBackfillRequest{
Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err = fs.UpdateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, tc.request.Backfill.SearchFields.DoubleArgs, res.SearchFields.DoubleArgs)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
fs = frontendService{cfg, store}
defer closer()
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err = fs.UpdateBackfill(ctx, &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unknown.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestDoWatchAssignments(t *testing.T) {
testTicket := &pb.Ticket{
Id: "test-id",
@ -296,12 +118,12 @@ func TestDoWatchAssignments(t *testing.T) {
{
description: "expect two assignment reads from preAction writes and fail in grpc aborted code",
preAction: func(ctx context.Context, t *testing.T, store statestore.Service, wantAssignments []*pb.Assignment, wg *sync.WaitGroup) {
require.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.CreateTicket(ctx, testTicket))
go func(wg *sync.WaitGroup) {
for i := 0; i < len(wantAssignments); i++ {
time.Sleep(50 * time.Millisecond)
_, _, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
_, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{testTicket.GetId()},
@ -309,7 +131,7 @@ func TestDoWatchAssignments(t *testing.T) {
},
},
})
require.NoError(t, err)
assert.Nil(t, err)
wg.Done()
}
}(wg)
@ -333,99 +155,16 @@ func TestDoWatchAssignments(t *testing.T) {
test.preAction(ctx, t, store, test.wantAssignments, &wg)
err := doWatchAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
assert.Equal(t, test.wantCode, status.Convert(err).Code())
wg.Wait()
for i := 0; i < len(gotAssignments); i++ {
require.Equal(t, gotAssignments[i], test.wantAssignments[i])
assert.Equal(t, gotAssignments[i], test.wantAssignments[i])
}
})
}
}
// TestAcknowledgeBackfillValidation - test input validation only
func TestAcknowledgeBackfillValidation(t *testing.T) {
cfg := viper.New()
tests := []struct {
description string
request *pb.AcknowledgeBackfillRequest
expectedMessage string
}{
{
description: "no BackfillId, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "", Assignment: &pb.Assignment{Connection: "10.0.0.1"}},
expectedMessage: ".BackfillId is required",
},
{
description: "no Assignment, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "1234", Assignment: nil},
expectedMessage: ".Assignment is required",
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs := frontendService{cfg, store}
bf, err := fs.AcknowledgeBackfill(ctx, test.request)
require.Equal(t, codes.InvalidArgument.String(), status.Convert(err).Code().String())
require.Equal(t, test.expectedMessage, status.Convert(err).Message())
require.Nil(t, bf)
})
}
}
// TestAcknowledgeBackfill verifies timestamp part of AcknowledgeBackfill call,
// assignment part tested in a corresponding E2E test.
// Expired backfill can not be acknowledged
func TestAcknowledgeBackfill(t *testing.T) {
cfg := viper.New()
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
fs := frontendService{cfg, store}
resp, err := fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.NoError(t, err)
require.NotNil(t, resp)
require.NotNil(t, resp.Backfill)
require.NotNil(t, resp.Tickets)
// Use wrong BackfillID, error is returned
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: "42", Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Error(t, err)
require.Nil(t, resp)
require.Equal(t, "Backfill id: 42 not found", status.Convert(err).Message())
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
ids, err := store.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, ids, 1)
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Nil(t, resp)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "can not acknowledge an expired backfill, id: 1")
}
func TestDoDeleteTicket(t *testing.T) {
fakeTicket := &pb.Ticket{
Id: "1",
@ -472,7 +211,7 @@ func TestDoDeleteTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
err := doDeleteTicket(ctx, fakeTicket.GetId(), store)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
assert.Equal(t, test.wantCode, status.Convert(err).Code())
})
}
}
@ -525,119 +264,13 @@ func TestDoGetTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
ticket, err := store.GetTicket(ctx, fakeTicket.GetId())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
ticket, err := doGetTickets(ctx, fakeTicket.GetId(), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
require.Equal(t, test.wantTicket.GetId(), ticket.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
assert.Equal(t, test.wantTicket.GetId(), ticket.GetId())
assert.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
}
})
}
}
func TestGetBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
cfg := viper.New()
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service)
wantTicket *pb.Backfill
wantCode codes.Code
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
wantCode: codes.Unavailable,
},
{
description: "expect not found code since ticket does not exist",
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
wantCode: codes.NotFound,
},
{
description: "expect ok code with output ticket equivalent to fakeBackfill",
preAction: func(ctx context.Context, _ context.CancelFunc, store statestore.Service) {
store.CreateBackfill(ctx, fakeBackfill, []string{})
},
wantCode: codes.OK,
wantTicket: fakeBackfill,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
fs := frontendService{cfg, store}
test.preAction(ctx, cancel, store)
backfill, err := fs.GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
if err == nil {
require.Equal(t, test.wantTicket.GetId(), backfill.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, backfill.SearchFields.DoubleArgs)
}
})
}
}
func TestDoDeleteBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
ctx := context.Background()
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
cfg := viper.New()
fs := frontendService{cfg, store}
tests := []struct {
description string
id string
wantCode codes.Code
}{
{
description: "expect ok code since delete backfill does not care about if backfill exists or not",
id: "222",
wantCode: codes.OK,
},
{
description: "expect ok code",
id: "1",
wantCode: codes.OK,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
_, err := fs.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.NoError(t, err)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
})
}
}

@ -1,250 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
// cache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type cache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Multithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
value interface{}
update func(statestore.Service, interface{}) error
err error
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (c *cache) request(ctx context.Context, f func(interface{})) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled before request sent.")
case <-c.startRunRequest:
go c.runRequest()
case c.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled waiting for access.")
case <-cr.runNow:
defer c.wg.Done()
}
if c.err != nil {
return c.err
}
f(c.value)
return nil
}
func (c *cache) runRequest() {
defer func() {
c.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-c.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-c.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
c.err = c.update(c.store, c.value)
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the cache.
for _, req := range reqs {
c.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
c.wg.Done()
}
}
// wait for requests to finish using cache.
c.wg.Wait()
}
func newTicketCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Ticket),
update: updateTicketCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateTicketCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Ticket, but got: %T", value)
}
t := time.Now()
previousCount := len(tickets)
currentAll, err := store.GetIndexedIDSet(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id := range tickets {
if _, ok := currentAll[id]; !ok {
delete(tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := store.GetTickets(context.Background(), toFetch)
if err != nil {
return err
}
for _, t := range newTickets {
tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tickets))
return nil
}
func newBackfillCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Backfill),
update: updateBackfillCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateBackfillCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Backfill, but got: %T", value)
}
t := time.Now()
previousCount := len(backfills)
index, err := store.GetIndexedBackfills(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id, backfill := range backfills {
generation, ok := index[id]
if !ok || backfill.Generation < int64(generation) {
delete(backfills, id)
deletedCount++
}
}
toFetch := []string{}
for id := range index {
if _, ok := backfills[id]; !ok {
toFetch = append(toFetch, id)
}
}
fetchedBackfills, err := store.GetBackfills(context.Background(), toFetch)
if err != nil {
return err
}
for _, b := range fetchedBackfills {
backfills[b.Id] = b
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
logger.Debugf("Backfill Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(backfills))
return nil
}

@ -19,15 +19,13 @@ import (
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
backfillsPerQuery = stats.Int64("open-match.dev/query/backfills_per_query", "Number of backfills per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of items query service cached", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
@ -38,16 +36,10 @@ var (
Description: "Tickets per query",
Aggregation: telemetry.DefaultCountDistribution,
}
backfillsPerQueryView = &view.View{
Measure: ticketsPerQuery,
Name: "open-match.dev/query/backfills_per_query",
Description: "Backfills per query",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheTotalItemsView = &view.View{
Measure: cacheTotalItems,
Name: "open-match.dev/query/total_cached_items",
Description: "Total number of cached items",
Description: "Total number of cached tickets",
Aggregation: view.LastValue(),
}
cacheFetchedItemsView = &view.View{
@ -78,11 +70,9 @@ var (
// BindService creates the query service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
store := statestore.New(p.Config())
service := &queryService{
cfg: p.Config(),
tc: newTicketCache(b, store),
bc: newBackfillCache(b, store),
tc: newTicketCache(b, p.Config()),
}
b.AddHandleFunc(func(s *grpc.Server) {
@ -90,7 +80,6 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
}, pb.RegisterQueryServiceHandlerFromEndpoint)
b.RegisterViews(
ticketsPerQueryView,
backfillsPerQueryView,
cacheTotalItemsView,
cacheUpdateView,
cacheFetchedItemsView,

@ -15,14 +15,20 @@
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
@ -34,11 +40,10 @@ var (
)
// queryService API provides utility functions for common MMF functionality such
// as retrieving Tickets from state storage.
// as retreiving Tickets from state storage.
type queryService struct {
cfg config.View
tc *cache
bc *cache
tc *ticketCache
}
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
@ -54,13 +59,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
var results []*pb.Ticket
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
@ -68,7 +67,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
})
if err != nil {
err = errors.Wrap(err, "QueryTickets: failed to run request")
logger.WithError(err).Error("Failed to run request.")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
@ -104,13 +103,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
var results []string
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
@ -118,7 +111,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
})
if err != nil {
err = errors.Wrap(err, "QueryTicketIds: failed to run request")
logger.WithError(err).Error("Failed to run request.")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
@ -141,56 +134,6 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
return nil
}
func (s *queryService) QueryBackfills(req *pb.QueryBackfillsRequest, responseServer pb.QueryService_QueryBackfillsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
if err != nil {
return err
}
var results []*pb.Backfill
err = s.bc.request(ctx, func(value interface{}) {
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Backfill, but got: %T", value)
return
}
for _, backfill := range backfills {
if pf.In(backfill) {
results = append(results, backfill)
}
}
})
if err != nil {
err = errors.Wrap(err, "QueryBackfills: failed to run request")
return err
}
stats.Record(ctx, backfillsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryBackfillsResponse{
Backfills: results[start:end],
})
if err != nil {
return err
}
}
return nil
}
func getPageSize(cfg config.View) int {
const (
name = "queryPageSize"
@ -222,3 +165,159 @@ func getPageSize(cfg config.View) int {
return pSize
}
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// ticketCache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type ticketCache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Mutlithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
tickets map[string]*pb.Ticket
err error
}
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
tickets: make(map[string]*pb.Ticket),
}
tc.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
case <-tc.startRunRequest:
go tc.runRequest()
case tc.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
case <-cr.runNow:
defer tc.wg.Done()
}
if tc.err != nil {
return tc.err
}
f(tc.tickets)
return nil
}
func (tc *ticketCache) runRequest() {
defer func() {
tc.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-tc.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-tc.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
tc.update()
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
for _, req := range reqs {
tc.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
tc.wg.Done()
}
}
// wait for requests to finish using ticket cache.
tc.wg.Wait()
}
func (tc *ticketCache) update() {
st := time.Now()
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
if err != nil {
tc.err = err
return
}
deletedCount := 0
for id := range tc.tickets {
if _, ok := currentAll[id]; !ok {
delete(tc.tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tc.tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
if err != nil {
tc.err = err
return
}
for _, t := range newTickets {
tc.tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

@ -18,7 +18,6 @@ import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"open-match.dev/open-match/internal/config"
)
@ -62,7 +61,9 @@ func TestGetPageSize(t *testing.T) {
cfg := viper.New()
tt.configure(cfg)
actual := getPageSize(cfg)
require.Equal(t, tt.expected, actual)
if actual != tt.expected {
t.Errorf("got %d, want %d", actual, tt.expected)
}
})
}
}

@ -43,7 +43,7 @@ var (
// Streams from multiple GRPC calls of matches are combined on a single channel.
// These matches are sent to the evaluator, then the tickets are added to the
// pending release list. Finally the matches are returned to the calling stream.
// ignore list. Finally the matches are returned to the calling stream.
// receive from backend | Synchronize
// -> m1c ->
@ -51,11 +51,11 @@ var (
// -> m2c ->
// remember return channel m7c for match | fanInFanOut
// -> m3c ->
// set mappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// setmappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// -> m4c -> (buffered)
// send to evaluator | wrapEvaluator
// -> m5c -> (buffered)
// add tickets to pending release | addMatchesToPendingRelease
// add tickets to ignore list | addMatchesToIgnoreList
// -> m6c ->
// fan out to origin synchronize call | fanInFanOut
// -> (Synchronize call specific ) m7c -> (buffered)
@ -113,7 +113,7 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
registration.allM1cSent.Done()
return
}
registration.m1c.send(mAndM7c{m: req.Proposal, m7c: registration.m7c})
registration.m1c.send(mAndM6c{m: req.Proposal, m7c: registration.m7c})
}
}()
@ -212,7 +212,7 @@ func (s *synchronizerService) runCycle() {
/////////////////////////////////////// Initialize cycle
ctx, cancel := contextcause.WithCancelCause(context.Background())
m2c := make(chan mAndM7c)
m2c := make(chan mAndM6c)
m3c := make(chan *pb.Match)
m4c := make(chan *pb.Match)
m5c := make(chan string)
@ -240,8 +240,8 @@ func (s *synchronizerService) runCycle() {
go s.cacheMatchIDToTicketIDs(matchTickets, m3c, m4c)
go s.wrapEvaluator(ctx, cancel, bufferMatchChannel(m4c), m5c)
go func() {
s.addMatchesToPendingRelease(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for pending release, but not all matches returned, the next cycle
s.addMatchesToIgnoreList(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for ignore list, but not all matches returned, the next cycle
// can start now.
close(closedOnCycleEnd)
}()
@ -289,24 +289,17 @@ Registration:
r.cancelMmfs <- struct{}{}
}
})
<-closedOnCycleEnd
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
// Clean up in case it was never needed.
cancelProposalCollection.Stop()
err := s.store.CleanupBackfills(ctx)
if err != nil {
logger.Errorf("Failed to clean up backfills, %s", err.Error())
}
}
///////////////////////////////////////
///////////////////////////////////////
type mAndM7c struct {
type mAndM6c struct {
m *pb.Match
m7c chan string
}
@ -316,10 +309,10 @@ type mAndM7c struct {
// This channel is remembered in a map, and the match is passed to be evaluated.
// When a match returns from evaluation, it's ID is looked up in the map and the
// match is returned on that channel.
func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
m7cMap := make(map[string]chan<- string)
func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
m6cMap := make(map[string]chan<- string)
defer func(m2c <-chan mAndM7c) {
defer func(m2c <-chan mAndM6c) {
for range m2c {
}
}(m2c)
@ -328,7 +321,7 @@ func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
select {
case m2, ok := <-m2c:
if ok {
m7cMap[m2.m.GetMatchId()] = m2.m7c
m6cMap[m2.m.GetMatchId()] = m2.m7c
m3c <- m2.m
} else {
close(m3c)
@ -341,7 +334,7 @@ func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
return
}
m7c, ok := m7cMap[m5]
m7c, ok := m6cMap[m5]
if ok {
m7c <- m5
} else {
@ -357,8 +350,8 @@ func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
///////////////////////////////////////
type cutoffSender struct {
m1c chan<- mAndM7c
m2c chan<- mAndM7c
m1c chan<- mAndM6c
m2c chan<- mAndM6c
closed chan struct{}
closeOnce sync.Once
}
@ -366,8 +359,8 @@ type cutoffSender struct {
// cutoffSender allows values to be passed on the provided channel until cutoff
// has been called. This closed the provided channel. Calls to send after
// cutoff work, but values are ignored.
func newCutoffSender(m2c chan<- mAndM7c) *cutoffSender {
m1c := make(chan mAndM7c)
func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
m1c := make(chan mAndM6c)
c := &cutoffSender{
m1c: m1c,
m2c: m2c,
@ -390,7 +383,7 @@ func newCutoffSender(m2c chan<- mAndM7c) *cutoffSender {
}
// send passes the value on the channel if still open, otherwise does nothing.
func (c *cutoffSender) send(match mAndM7c) {
func (c *cutoffSender) send(match mAndM6c) {
select {
case <-c.closed:
case c.m1c <- match:
@ -442,10 +435,10 @@ func getTicketIds(tickets []*pb.Ticket) []string {
///////////////////////////////////////
// Calls statestore to add all of the tickets returned by the evaluator to the
// pendingRelease list. If it partially fails for whatever reason (not all tickets will
// necessarily be in the same call), only the matches which can be safely
// ignorelist. If it partially fails for whatever reason (not all tickets will
// nessisarily be in the same call), only the matches which can be safely
// returned to the Synchronize calls are.
func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
totalMatches := 0
successfulMatches := 0
var lastErr error
@ -460,7 +453,7 @@ func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m
}
}
err := s.store.AddTicketsToPendingRelease(ctx, ids)
err := s.store.AddTicketsToIgnoreList(ctx, ids)
totalMatches += len(mIDs)
if err == nil {
@ -479,10 +472,10 @@ func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m
"error": lastErr.Error(),
"totalMatches": totalMatches,
"successfulMatches": successfulMatches,
}).Error("some or all matches were not successfully added to the pending release, failed matches dropped")
}).Error("some or all matches were not successfully added to the ignore list, failed matches dropped")
if successfulMatches == 0 {
cancel(fmt.Errorf("no matches successfully added to the pending release. Last error: %w", lastErr))
cancel(fmt.Errorf("no matches successfully added to the ignore list. Last error: %w", lastErr))
}
}
close(m6c)

@ -1,4 +1,3 @@
//go:build !race
// +build !race
// Copyright 2019 Google LLC

@ -21,7 +21,6 @@ import (
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -73,23 +72,16 @@ func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
}, nil
}
type filteredEntity interface {
GetId() string
GetSearchFields() *pb.SearchFields
GetCreateTime() *timestamp.Timestamp
}
// In returns true if the Ticket meets all the criteria for this PoolFilter.
func (pf *PoolFilter) In(entity filteredEntity) bool {
s := entity.GetSearchFields()
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
s := ticket.GetSearchFields()
if s == nil {
s = emptySearchFields
}
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
// CreateTime is only populated by Open Match and hence expected to be valid.
if ct, err := ptypes.Timestamp(entity.GetCreateTime()); err == nil {
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
if !pf.CreatedAfter.IsZero() {
if !ct.After(pf.CreatedAfter) {
return false
@ -104,7 +96,7 @@ func (pf *PoolFilter) In(entity filteredEntity) bool {
} else {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": entity.GetId(),
"id": ticket.GetId(),
}).Error("failed to get time from Timestamp proto")
}
}
@ -114,27 +106,10 @@ func (pf *PoolFilter) In(entity filteredEntity) bool {
if !ok {
return false
}
switch f.Exclude {
case pb.DoubleRangeFilter_NONE:
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MIN:
if !(v > f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MAX:
if !(v >= f.Min && v < f.Max) {
return false
}
case pb.DoubleRangeFilter_BOTH:
if !(v > f.Min && v < f.Max) {
return false
}
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
}
}
for _, f := range pf.StringEqualsFilters {

@ -19,7 +19,7 @@ import (
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/filter/testcases"
@ -27,53 +27,31 @@ import (
)
func TestMeetsCriteria(t *testing.T) {
testInclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if !pf.In(entity) {
t.Error("entity should be included in the pool")
}
}
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
testInclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
testInclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if !pf.In(tc.Ticket) {
t.Error("ticket should be included in the pool")
}
})
}
testExclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if pf.In(entity) {
t.Error("ticket should be excluded from the pool")
}
}
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
testExclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
testExclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if pf.In(tc.Ticket) {
t.Error("ticket should be excluded from the pool")
}
})
}
}
@ -105,13 +83,10 @@ func TestValidPoolFilter(t *testing.T) {
tc := tc
t.Run(tc.name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.pool)
require.Error(t, err)
require.Nil(t, pf)
assert.Nil(t, pf)
s := status.Convert(err)
require.Equal(t, tc.code, s.Code())
require.Equal(t, tc.msg, s.Message())
assert.Equal(t, tc.code, s.Code())
assert.Equal(t, tc.msg, s.Message())
})
}
}

@ -27,9 +27,9 @@ import (
// TestCase defines a single filtering test case to run.
type TestCase struct {
Name string
SearchFields *pb.SearchFields
Pool *pb.Pool
Name string
Ticket *pb.Ticket
Pool *pb.Pool
}
// IncludedTestCases returns a list of test cases where using the given filter,
@ -39,38 +39,22 @@ func IncludedTestCases() []TestCase {
return []TestCase{
{
"no filters or fields",
nil,
&pb.Ticket{},
&pb.Pool{},
},
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeNone", 0, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeNone", 1, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeMin", 1, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeMax", 0, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeBoth", 2, 0, 3, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 3, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("simpleInRange", 5, 0, 10),
simpleDoubleRange("exactMatch", 5, 5, 5),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
{
"String equals simple positive",
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
},
&pb.Pool{
@ -85,9 +69,11 @@ func IncludedTestCases() []TestCase {
{
"TagPresent simple positive",
&pb.SearchFields{
Tags: []string{
"mytag",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"mytag",
},
},
},
&pb.Pool{
@ -101,9 +87,11 @@ func IncludedTestCases() []TestCase {
{
"TagPresent multiple all present",
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
},
&pb.Pool{
@ -125,21 +113,21 @@ func IncludedTestCases() []TestCase {
{
"CreatedBefore simple positive",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
},
},
{
"CreatedAfter simple positive",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
},
},
{
"Between CreatedBefore and CreatedAfter positive",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
@ -147,7 +135,7 @@ func IncludedTestCases() []TestCase {
},
{
"No time search criteria positive",
nil,
&pb.Ticket{},
&pb.Pool{},
},
}
@ -160,7 +148,7 @@ func ExcludedTestCases() []TestCase {
return []TestCase{
{
"DoubleRange no SearchFields",
nil,
&pb.Ticket{},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
@ -173,7 +161,7 @@ func ExcludedTestCases() []TestCase {
},
{
"StringEquals no SearchFields",
nil,
&pb.Ticket{},
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
@ -185,7 +173,7 @@ func ExcludedTestCases() []TestCase {
},
{
"TagPresent no SearchFields",
nil,
&pb.Ticket{},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
@ -194,11 +182,14 @@ func ExcludedTestCases() []TestCase {
},
},
},
{
"double range missing field",
&pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
},
},
&pb.Pool{
@ -212,66 +203,22 @@ func ExcludedTestCases() []TestCase {
},
},
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsMax", 1, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsMin", 0, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeBoth", 0, 0, 1, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 1, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooLow", -1, 0, 10),
simpleDoubleRange("valueTooHigh", 11, 0, 10),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
{
"String equals simple negative", // and case sensitivity
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
},
&pb.Pool{
@ -286,9 +233,11 @@ func ExcludedTestCases() []TestCase {
{
"String equals missing field",
&pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
},
},
&pb.Pool{
@ -303,9 +252,11 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent simple negative", // and case sensitivity
&pb.SearchFields{
Tags: []string{
"MYTAG",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"MYTAG",
},
},
},
&pb.Pool{
@ -319,9 +270,11 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent multiple with one missing",
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
},
&pb.Pool{
@ -341,21 +294,21 @@ func ExcludedTestCases() []TestCase {
{
"CreatedBefore simple negative",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
},
},
{
"CreatedAfter simple negative",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
},
},
{
"Created before time range negative",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
@ -363,7 +316,7 @@ func ExcludedTestCases() []TestCase {
},
{
"Created after time range negative",
nil,
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
@ -376,12 +329,14 @@ func ExcludedTestCases() []TestCase {
}
}
func simpleDoubleRange(name string, value, min, max float64, exclude pb.DoubleRangeFilter_Exclude) TestCase {
func simpleDoubleRange(name string, value, min, max float64) TestCase {
return TestCase{
"double range " + name,
&pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
},
},
&pb.Pool{
@ -390,7 +345,6 @@ func simpleDoubleRange(name string, value, min, max float64, exclude pb.DoubleRa
DoubleArg: "field",
Min: min,
Max: max,
Exclude: exclude,
},
},
},
@ -415,14 +369,16 @@ func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
return TestCase{
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
&pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{

@ -1,177 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0-devel
// protoc v3.10.1
// source: internal/api/messages.proto
package ipb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type BackfillInternal struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Represents a backfill entity which is used to fill partially full matches
Backfill *pb.Backfill `protobuf:"bytes,1,opt,name=backfill,proto3" json:"backfill,omitempty"`
// List of ticket IDs associated with a current backfill
TicketIds []string `protobuf:"bytes,2,rep,name=ticket_ids,json=ticketIds,proto3" json:"ticket_ids,omitempty"`
}
func (x *BackfillInternal) Reset() {
*x = BackfillInternal{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_messages_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BackfillInternal) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackfillInternal) ProtoMessage() {}
func (x *BackfillInternal) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_messages_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BackfillInternal.ProtoReflect.Descriptor instead.
func (*BackfillInternal) Descriptor() ([]byte, []int) {
return file_internal_api_messages_proto_rawDescGZIP(), []int{0}
}
func (x *BackfillInternal) GetBackfill() *pb.Backfill {
if x != nil {
return x.Backfill
}
return nil
}
func (x *BackfillInternal) GetTicketIds() []string {
if x != nil {
return x.TicketIds
}
return nil
}
var File_internal_api_messages_proto protoreflect.FileDescriptor
var file_internal_api_messages_proto_rawDesc = []byte{
0x0a, 0x1b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x62, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c,
0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x2f, 0x0a, 0x08, 0x62, 0x61, 0x63,
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70,
0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69,
0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_internal_api_messages_proto_rawDescOnce sync.Once
file_internal_api_messages_proto_rawDescData = file_internal_api_messages_proto_rawDesc
)
func file_internal_api_messages_proto_rawDescGZIP() []byte {
file_internal_api_messages_proto_rawDescOnce.Do(func() {
file_internal_api_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_messages_proto_rawDescData)
})
return file_internal_api_messages_proto_rawDescData
}
var file_internal_api_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_internal_api_messages_proto_goTypes = []interface{}{
(*BackfillInternal)(nil), // 0: openmatch.internal.BackfillInternal
(*pb.Backfill)(nil), // 1: openmatch.Backfill
}
var file_internal_api_messages_proto_depIdxs = []int32{
1, // 0: openmatch.internal.BackfillInternal.backfill:type_name -> openmatch.Backfill
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_api_messages_proto_init() }
func file_internal_api_messages_proto_init() {
if File_internal_api_messages_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_api_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BackfillInternal); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_messages_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_internal_api_messages_proto_goTypes,
DependencyIndexes: file_internal_api_messages_proto_depIdxs,
MessageInfos: file_internal_api_messages_proto_msgTypes,
}.Build()
File_internal_api_messages_proto = out.File
file_internal_api_messages_proto_rawDesc = nil
file_internal_api_messages_proto_goTypes = nil
file_internal_api_messages_proto_depIdxs = nil
}

@ -1,97 +1,71 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0-devel
// protoc v3.10.1
// source: internal/api/synchronizer.proto
package ipb
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
math "math"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type SynchronizeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// A match returned by an mmf.
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (x *SynchronizeRequest) Reset() {
*x = SynchronizeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeRequest) ProtoMessage() {}
func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead.
func (m *SynchronizeRequest) Reset() { *m = SynchronizeRequest{} }
func (m *SynchronizeRequest) String() string { return proto.CompactTextString(m) }
func (*SynchronizeRequest) ProtoMessage() {}
func (*SynchronizeRequest) Descriptor() ([]byte, []int) {
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{0}
return fileDescriptor_35ff6b85fea1c4b7, []int{0}
}
func (x *SynchronizeRequest) GetProposal() *pb.Match {
if x != nil {
return x.Proposal
func (m *SynchronizeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeRequest.Unmarshal(m, b)
}
func (m *SynchronizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeRequest.Marshal(b, m, deterministic)
}
func (m *SynchronizeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeRequest.Merge(m, src)
}
func (m *SynchronizeRequest) XXX_Size() int {
return xxx_messageInfo_SynchronizeRequest.Size(m)
}
func (m *SynchronizeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeRequest proto.InternalMessageInfo
func (m *SynchronizeRequest) GetProposal() *pb.Match {
if m != nil {
return m.Proposal
}
return nil
}
type SynchronizeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Instructs the backend call that it can start running the mmfs.
StartMmfs bool `protobuf:"varint,1,opt,name=start_mmfs,json=startMmfs,proto3" json:"start_mmfs,omitempty"`
// Instructs the backend call that it should cancel any RPC calls to the mmfs,
@ -99,181 +73,93 @@ type SynchronizeResponse struct {
CancelMmfs bool `protobuf:"varint,2,opt,name=cancel_mmfs,json=cancelMmfs,proto3" json:"cancel_mmfs,omitempty"`
// A match ID returned by the evaluator and should be returned to the FetchMatches
// caller.
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (x *SynchronizeResponse) Reset() {
*x = SynchronizeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeResponse) ProtoMessage() {}
func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead.
func (m *SynchronizeResponse) Reset() { *m = SynchronizeResponse{} }
func (m *SynchronizeResponse) String() string { return proto.CompactTextString(m) }
func (*SynchronizeResponse) ProtoMessage() {}
func (*SynchronizeResponse) Descriptor() ([]byte, []int) {
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{1}
return fileDescriptor_35ff6b85fea1c4b7, []int{1}
}
func (x *SynchronizeResponse) GetStartMmfs() bool {
if x != nil {
return x.StartMmfs
func (m *SynchronizeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeResponse.Unmarshal(m, b)
}
func (m *SynchronizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeResponse.Marshal(b, m, deterministic)
}
func (m *SynchronizeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeResponse.Merge(m, src)
}
func (m *SynchronizeResponse) XXX_Size() int {
return xxx_messageInfo_SynchronizeResponse.Size(m)
}
func (m *SynchronizeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeResponse proto.InternalMessageInfo
func (m *SynchronizeResponse) GetStartMmfs() bool {
if m != nil {
return m.StartMmfs
}
return false
}
func (x *SynchronizeResponse) GetCancelMmfs() bool {
if x != nil {
return x.CancelMmfs
func (m *SynchronizeResponse) GetCancelMmfs() bool {
if m != nil {
return m.CancelMmfs
}
return false
}
func (x *SynchronizeResponse) GetMatchId() string {
if x != nil {
return x.MatchId
func (m *SynchronizeResponse) GetMatchId() string {
if m != nil {
return m.MatchId
}
return ""
}
var File_internal_api_synchronizer_proto protoreflect.FileDescriptor
var file_internal_api_synchronizer_proto_rawDesc = []byte{
0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x12, 0x53, 0x79, 0x6e,
0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x22, 0x76, 0x0a,
0x13, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4d,
0x6d, 0x66, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c,
0x4d, 0x6d, 0x66, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x4a,
0x04, 0x08, 0x03, 0x10, 0x04, 0x32, 0x72, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
func init() {
proto.RegisterType((*SynchronizeRequest)(nil), "openmatch.internal.SynchronizeRequest")
proto.RegisterType((*SynchronizeResponse)(nil), "openmatch.internal.SynchronizeResponse")
}
var (
file_internal_api_synchronizer_proto_rawDescOnce sync.Once
file_internal_api_synchronizer_proto_rawDescData = file_internal_api_synchronizer_proto_rawDesc
)
func init() { proto.RegisterFile("internal/api/synchronizer.proto", fileDescriptor_35ff6b85fea1c4b7) }
func file_internal_api_synchronizer_proto_rawDescGZIP() []byte {
file_internal_api_synchronizer_proto_rawDescOnce.Do(func() {
file_internal_api_synchronizer_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_synchronizer_proto_rawDescData)
})
return file_internal_api_synchronizer_proto_rawDescData
}
var file_internal_api_synchronizer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_internal_api_synchronizer_proto_goTypes = []interface{}{
(*SynchronizeRequest)(nil), // 0: openmatch.internal.SynchronizeRequest
(*SynchronizeResponse)(nil), // 1: openmatch.internal.SynchronizeResponse
(*pb.Match)(nil), // 2: openmatch.Match
}
var file_internal_api_synchronizer_proto_depIdxs = []int32{
2, // 0: openmatch.internal.SynchronizeRequest.proposal:type_name -> openmatch.Match
0, // 1: openmatch.internal.Synchronizer.Synchronize:input_type -> openmatch.internal.SynchronizeRequest
1, // 2: openmatch.internal.Synchronizer.Synchronize:output_type -> openmatch.internal.SynchronizeResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_api_synchronizer_proto_init() }
func file_internal_api_synchronizer_proto_init() {
if File_internal_api_synchronizer_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_api_synchronizer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_api_synchronizer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_synchronizer_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_internal_api_synchronizer_proto_goTypes,
DependencyIndexes: file_internal_api_synchronizer_proto_depIdxs,
MessageInfos: file_internal_api_synchronizer_proto_msgTypes,
}.Build()
File_internal_api_synchronizer_proto = out.File
file_internal_api_synchronizer_proto_rawDesc = nil
file_internal_api_synchronizer_proto_goTypes = nil
file_internal_api_synchronizer_proto_depIdxs = nil
var fileDescriptor_35ff6b85fea1c4b7 = []byte{
// 263 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0x89, 0x16, 0x4d, 0x27, 0x1e, 0xca, 0x7a, 0xa9, 0x05, 0x69, 0xe9, 0xa1, 0xe6, 0xa0,
0x1b, 0xa9, 0xdf, 0xa0, 0x37, 0x85, 0x5e, 0xe2, 0xcd, 0x4b, 0xd9, 0x24, 0x53, 0xbb, 0x90, 0xfd,
0xe3, 0xce, 0x5a, 0xd0, 0x4f, 0x2f, 0xd9, 0xc5, 0xa6, 0xd2, 0x83, 0x97, 0x85, 0x37, 0xf3, 0xdb,
0x37, 0xcc, 0x1b, 0x98, 0x4a, 0xed, 0xd1, 0x69, 0xd1, 0x16, 0xc2, 0xca, 0x82, 0xbe, 0x74, 0xbd,
0x73, 0x46, 0xcb, 0x6f, 0x74, 0xdc, 0x3a, 0xe3, 0x0d, 0x63, 0xc6, 0xa2, 0x56, 0xc2, 0xd7, 0x3b,
0xfe, 0x8b, 0x4e, 0x58, 0xc7, 0x2a, 0x24, 0x12, 0xef, 0x48, 0x91, 0x9b, 0xaf, 0x80, 0xbd, 0xf6,
0xbf, 0x4b, 0xfc, 0xf8, 0x44, 0xf2, 0xec, 0x1e, 0x52, 0xeb, 0x8c, 0x35, 0x24, 0xda, 0x71, 0x32,
0x4b, 0xf2, 0x6c, 0x39, 0xe2, 0xbd, 0xe1, 0xba, 0x7b, 0xcb, 0x03, 0x31, 0xdf, 0xc3, 0xf5, 0x1f,
0x0f, 0xb2, 0x46, 0x13, 0xb2, 0x5b, 0x00, 0xf2, 0xc2, 0xf9, 0x8d, 0x52, 0x5b, 0x0a, 0x36, 0x69,
0x39, 0x0c, 0x95, 0xb5, 0xda, 0x12, 0x9b, 0x42, 0x56, 0x0b, 0x5d, 0x63, 0x1b, 0xfb, 0x67, 0xa1,
0x0f, 0xb1, 0x14, 0x80, 0x1b, 0x48, 0xc3, 0xbc, 0x8d, 0x6c, 0xc6, 0x83, 0x59, 0x92, 0x0f, 0xcb,
0xcb, 0xa0, 0x9f, 0x9b, 0x97, 0x41, 0x7a, 0x3e, 0x1a, 0x2c, 0x1d, 0x5c, 0x1d, 0xcd, 0x75, 0xac,
0x82, 0xec, 0x48, 0xb3, 0x05, 0x3f, 0xcd, 0x80, 0x9f, 0x2e, 0x3b, 0xb9, 0xfb, 0x97, 0x8b, 0x0b,
0xe5, 0xc9, 0x63, 0xb2, 0xca, 0xdf, 0x16, 0x1d, 0xfd, 0x10, 0xf1, 0x06, 0xf7, 0x45, 0x2f, 0x8b,
0xc3, 0x51, 0xa4, 0xad, 0xaa, 0x8b, 0x10, 0xf0, 0xd3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84,
0xba, 0xf6, 0x41, 0xab, 0x01, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
const _ = grpc.SupportPackageIsVersion4
// SynchronizerClient is the client API for Synchronizer service.
//
@ -285,10 +171,10 @@ type SynchronizerClient interface {
}
type synchronizerClient struct {
cc grpc.ClientConnInterface
cc *grpc.ClientConn
}
func NewSynchronizerClient(cc grpc.ClientConnInterface) SynchronizerClient {
func NewSynchronizerClient(cc *grpc.ClientConn) SynchronizerClient {
return &synchronizerClient{cc}
}
@ -334,7 +220,7 @@ type SynchronizerServer interface {
type UnimplementedSynchronizerServer struct {
}
func (*UnimplementedSynchronizerServer) Synchronize(Synchronizer_SynchronizeServer) error {
func (*UnimplementedSynchronizerServer) Synchronize(srv Synchronizer_SynchronizeServer) error {
return status.Errorf(codes.Unimplemented, "method Synchronize not implemented")
}

@ -21,7 +21,7 @@ import (
stackdriver "github.com/TV4/logrus-stackdriver-formatter"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
)
func TestNewFormatter(t *testing.T) {
@ -37,9 +37,9 @@ func TestNewFormatter(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("newFormatter(%s) => %s", tc.in, tc.expected), func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
actual := newFormatter(tc.in)
require.Equal(reflect.TypeOf(tc.expected), reflect.TypeOf(actual))
assert.Equal(reflect.TypeOf(tc.expected), reflect.TypeOf(actual))
})
}
}
@ -60,9 +60,9 @@ func TestIsDebugLevel(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("isDebugLevel(%s) => %t", tc.in, tc.expected), func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
actual := isDebugLevel(tc.in)
require.Equal(tc.expected, actual)
assert.Equal(tc.expected, actual)
})
}
}
@ -87,9 +87,9 @@ func TestToLevel(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("toLevel(%s) => %s", tc.in, tc.expected), func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
actual := toLevel(tc.in)
require.Equal(tc.expected, actual)
assert.Equal(tc.expected, actual)
})
}
}

@ -18,7 +18,7 @@ import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
)
const (
@ -27,31 +27,31 @@ const (
)
func TestGetGRPC(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cc := NewClientCache(viper.New())
client, err := cc.GetGRPC(fakeGRPCAddress)
require.Nil(err)
assert.Nil(err)
cachedClient, err := cc.GetGRPC(fakeGRPCAddress)
require.Nil(err)
assert.Nil(err)
// Test caching by comparing pointer value
require.EqualValues(client, cachedClient)
assert.EqualValues(client, cachedClient)
}
func TestGetHTTP(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cc := NewClientCache(viper.New())
client, address, err := cc.GetHTTP(fakeHTTPAddress)
require.Nil(err)
require.Equal(fakeHTTPAddress, address)
assert.Nil(err)
assert.Equal(fakeHTTPAddress, address)
cachedClient, address, err := cc.GetHTTP(fakeHTTPAddress)
require.Nil(err)
require.Equal(fakeHTTPAddress, address)
assert.Nil(err)
assert.Equal(fakeHTTPAddress, address)
// Test caching by comparing pointer value
require.EqualValues(client, cachedClient)
assert.EqualValues(client, cachedClient)
}

@ -22,11 +22,8 @@ import (
"os"
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/telemetry"
@ -37,48 +34,39 @@ import (
)
func TestSecureGRPCFromConfig(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, true)
defer closer()
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
runGrpcClientTests(t, assert, cfg, rpcParams)
}
func TestInsecureGRPCFromConfig(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, false)
defer closer()
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
}
func TestUnavailableGRPCFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "badhost")
defer closer()
runFailureGrpcClientTests(t, require, cfg, rpcParams, codes.Unavailable)
runGrpcClientTests(t, assert, cfg, rpcParams)
}
func TestHTTPSFromConfig(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, true)
defer closer()
runHTTPClientTests(require, cfg, rpcParams)
runHTTPClientTests(assert, cfg, rpcParams)
}
func TestInsecureHTTPFromConfig(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, false)
defer closer()
runHTTPClientTests(require, cfg, rpcParams)
runHTTPClientTests(assert, cfg, rpcParams)
}
func TestSanitizeHTTPAddress(t *testing.T) {
@ -100,15 +88,15 @@ func TestSanitizeHTTPAddress(t *testing.T) {
tc := testCase
description := fmt.Sprintf("sanitizeHTTPAddress(%s, %t) => (%s, %v)", tc.address, tc.preferHTTPS, tc.expected, tc.err)
t.Run(description, func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
actual, err := sanitizeHTTPAddress(tc.address, tc.preferHTTPS)
require.Equal(tc.expected, actual)
require.Equal(tc.err, err)
assert.Equal(tc.expected, actual)
assert.Equal(tc.err, err)
})
}
}
func setupClientConnection(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) *grpc.ClientConn {
func runGrpcClientTests(t *testing.T, assert *assert.Assertions, cfg config.View, rpcParams *ServerParams) {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
rpcParams.AddHandleFunc(func(s *grpc.Server) {
@ -116,45 +104,24 @@ func setupClientConnection(t *testing.T, require *require.Assertions, cfg config
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := &Server{}
t.Cleanup(func() {
defer s.Stop()
})
defer s.Stop()
err := s.Start(rpcParams)
require.Nil(err)
assert.Nil(err)
// Acquire grpc client
grpcConn, err := GRPCClientFromConfig(cfg, "test")
require.Nil(err)
require.NotNil(grpcConn)
return grpcConn
}
func runSuccessGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
assert.Nil(err)
assert.NotNil(grpcConn)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(grpcConn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
require.Nil(err)
require.NotNil(grpcResp)
assert.Nil(err)
assert.NotNil(grpcResp)
}
func runFailureGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams, expectedCode codes.Code) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(grpcConn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
require.Error(err)
require.Nil(grpcResp)
code := status.Code(err)
require.Equal(expectedCode, code)
}
func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *ServerParams) {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
rpcParams.AddHandleFunc(func(s *grpc.Server) {
@ -163,20 +130,20 @@ func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams
s := &Server{}
defer s.Stop()
err := s.Start(rpcParams)
require.Nil(err)
assert.Nil(err)
// Acquire http client
httpClient, baseURL, err := HTTPClientFromConfig(cfg, "test")
require.Nil(err)
assert.Nil(err)
// Confirm the client works as expected
httpReq, err := http.NewRequest(http.MethodGet, baseURL+telemetry.HealthCheckEndpoint, nil)
require.Nil(err)
require.NotNil(httpReq)
assert.Nil(err)
assert.NotNil(httpReq)
httpResp, err := httpClient.Do(httpReq)
require.Nil(err)
require.NotNil(httpResp)
assert.Nil(err)
assert.NotNil(httpResp)
defer func() {
if httpResp != nil {
httpResp.Body.Close()
@ -184,13 +151,13 @@ func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams
}()
body, err := ioutil.ReadAll(httpResp.Body)
require.Nil(err)
require.Equal(200, httpResp.StatusCode)
require.Equal("ok", string(body))
assert.Nil(err)
assert.Equal(200, httpResp.StatusCode)
assert.Equal("ok", string(body))
}
// Generate a config view and optional TLS key manifests (optional) for testing
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool, host string) (config.View, *ServerParams, func()) {
func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
// Create netlisteners on random ports used for rpc serving
grpcL := MustListen()
httpL := MustListen()
@ -198,13 +165,13 @@ func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions,
// Generate a config view with paths to the manifests
cfg := viper.New()
cfg.Set("test.hostname", host)
cfg.Set("test.hostname", "localhost")
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
cfg.Set("test.httpport", MustGetPortNumber(httpL))
// Create temporary TLS key files for testing
pubFile, err := ioutil.TempFile("", "pub*")
require.Nil(err)
assert.Nil(err)
if tlsEnabled {
// Generate public and private key bytes
@ -212,11 +179,11 @@ func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions,
fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL)),
fmt.Sprintf("localhost:%s", MustGetPortNumber(httpL)),
})
require.Nil(err)
assert.Nil(err)
// Write certgen key bytes to the temp files
err = ioutil.WriteFile(pubFile.Name(), pubBytes, 0400)
require.Nil(err)
assert.Nil(err)
// Generate a config view with paths to the manifests
cfg.Set(configNameClientTrustedCertificatePath, pubFile.Name())
@ -224,7 +191,7 @@ func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions,
rpcParams.SetTLSConfiguration(pubBytes, pubBytes, priBytes)
}
return cfg, rpcParams, func() { removeTempFile(t, pubFile.Name()) }
return cfg, rpcParams, func() { removeTempFile(assert, pubFile.Name()) }
}
func MustListen() net.Listener {
@ -243,11 +210,9 @@ func MustGetPortNumber(l net.Listener) string {
return port
}
func removeTempFile(t *testing.T, paths ...string) {
func removeTempFile(assert *assert.Assertions, paths ...string) {
for _, path := range paths {
err := os.Remove(path)
if err != nil {
t.Errorf("Can not remove the temporary file: %s, err: %s", path, err.Error())
}
assert.Nil(err)
}
}

@ -19,10 +19,9 @@ import (
"net"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/protobuf/encoding/protojson"
"open-match.dev/open-match/internal/telemetry"
)
@ -38,19 +37,7 @@ type insecureServer struct {
func (s *insecureServer) start(params *ServerParams) error {
s.httpMux = params.ServeMux
s.proxyMux = runtime.NewServeMux(
runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.HTTPBodyMarshaler{
Marshaler: &runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
UseProtoNames: true,
EmitUnpopulated: false,
},
UnmarshalOptions: protojson.UnmarshalOptions{
DiscardUnknown: true,
},
},
}),
)
s.proxyMux = runtime.NewServeMux()
// Configure the gRPC server.
s.grpcServer = grpc.NewServer(newGRPCServerOptions(params)...)

Some files were not shown because too many files have changed in this diff Show More