mirror of
https://github.com/googleforgames/open-match.git
synced 2025-03-22 19:08:31 +00:00
Compare commits
74 Commits
release-1.
...
v1.4.0
Author | SHA1 | Date | |
---|---|---|---|
2746222e87 | |||
7730c128bf | |||
ec8f757afe | |||
b6e5114715 | |||
23d2fd5042 | |||
2b73d52e0c | |||
47c34587dc | |||
76937b6350 | |||
2e03c1a197 | |||
eca40e3298 | |||
902c9d69b4 | |||
67767cf1cd | |||
6f46731b15 | |||
0d1a77c5de | |||
f2a23f5ba1 | |||
3fa588c1f8 | |||
cc08f39205 | |||
ec9cf00bcf | |||
8b8617f68d | |||
ce9b989e58 | |||
5c00395c78 | |||
faf3eded1f | |||
250d44aefd | |||
13fdf5960f | |||
aa5a1f9da1 | |||
ad1ca16218 | |||
7d849f3f04 | |||
05c8c8aa76 | |||
f50c9eec80 | |||
c6f23f01ca | |||
21efdb6691 | |||
81a1dc38b6 | |||
d0ddf22658 | |||
ee247c6c1a | |||
a17eb3bc72 | |||
3d194f541e | |||
3a0cd7611b | |||
c13b461795 | |||
b9e55fc727 | |||
dd1386a55b | |||
defac9065b | |||
f203384fbf | |||
7ef9c052bd | |||
ea744b8b51 | |||
1a8fc62833 | |||
1d5574b8a3 | |||
75a3d43477 | |||
252fc8090d | |||
2c617f2cb6 | |||
fcd590eca6 | |||
4b3147511b | |||
c85af44567 | |||
688262111d | |||
26d1aa236a | |||
fff37cd82c | |||
98a227b515 | |||
88cd95fe57 | |||
248494c04c | |||
aa4398e786 | |||
fc5c3629e8 | |||
8d86709632 | |||
0a273674b9 | |||
e2247a7f53 | |||
b269896c23 | |||
a210185098 | |||
4df95deb54 | |||
a9b8eec9e0 | |||
afa59327a4 | |||
d86b6c5121 | |||
2eb2921914 | |||
80d882b7c7 | |||
0f34e31778 | |||
d45eb74510 | |||
1765ab7b7e |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @laremere @aLekSer @HazWard @calebatwd @syntxerror @sawagh @andrewgrundy @scosgrave
|
3
.github/ISSUE_TEMPLATE/release.md
vendored
3
.github/ISSUE_TEMPLATE/release.md
vendored
@ -114,7 +114,6 @@ git push origin release-0.5
|
||||
- [ ] There might be additional references to the old version but be careful not to change it for places that have it for historical purposes.
|
||||
- [ ] Run `make release`
|
||||
- [ ] Run `make api/api.md` in open-match repo to update the auto-generated API references in open-match-docs repo.
|
||||
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
|
||||
- [ ] Create a PR with the changes, include the release candidate name, and point it to the release branch.
|
||||
- [ ] Go to [open-match-build](https://pantheon.corp.google.com/cloud-build/triggers?project=open-match-build) and update all *post submit* triggers' `_GCB_LATEST_VERSION` value to the `X.Y` of the release. This value should only increase as it's used to determine the latest stable version.
|
||||
- [ ] Merge your changes once the PR is approved.
|
||||
@ -152,6 +151,7 @@ only required once.**
|
||||
- [ ] Go to the History section and find the "Post Submit" build of the merged commit that's running. Wait for it to go Green. If it's red, fix error repeat this section. Take note of the docker image version tag for next step. Example: 0.5.0-a4706cb.
|
||||
- [ ] Run `./docs/governance/templates/release.sh {source version tag} {version}` to copy the images to open-match-public-images.
|
||||
- [ ] If this is a new minor version in the newest major version then run `./docs/governance/templates/release.sh {source version tag} latest`.
|
||||
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
|
||||
- [ ] Copy the files from `build/release/` generated from `make release` to the release draft you created. You can drag and drop the files using the Github UI.
|
||||
- [ ] Update [Slack invitation link](https://slack.com/help/articles/201330256-invite-new-members-to-your-workspace#share-an-invite-link) in [open-match.dev](https://open-match.dev/site/docs/contribute/#get-involved).
|
||||
- [ ] Test Open Match installation under GKE and Minikube enviroment using YAML files and Helm. Follow the [First Match](https://development.open-match.dev/site/docs/getting-started/first_match/) guide, run `make proxy-demo`, and open `localhost:51507` to make sure everything works.
|
||||
@ -165,6 +165,7 @@ only required once.**
|
||||
- [ ] Save the release as a draft.
|
||||
- [ ] Circulate the draft release to active contributors. Where reasonable, get everyone's ok on the release notes before continuing.
|
||||
- [ ] Publish the [Release](om-release) in Github. This will notify repository watchers.
|
||||
- [ ] Publish the [Release](om-release) on Open Match [Blog](https://open-match.dev/site/blog/).
|
||||
|
||||
## Announce
|
||||
|
||||
|
@ -18,7 +18,9 @@ WORKDIR /go/src/open-match.dev/open-match
|
||||
|
||||
ARG IMAGE_TITLE
|
||||
|
||||
RUN make "build/cmd/${IMAGE_TITLE}"
|
||||
RUN --mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
make "build/cmd/${IMAGE_TITLE}"
|
||||
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
ARG IMAGE_TITLE
|
||||
|
163
Makefile
163
Makefile
@ -15,44 +15,45 @@
|
||||
## Open Match Make Help
|
||||
## ====================
|
||||
##
|
||||
## Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
|
||||
## # Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
|
||||
## make activate-gcp-apis
|
||||
## make create-gke-cluster push-helm
|
||||
##
|
||||
## Create a Minikube Cluster (requires VirtualBox)
|
||||
## # Create a Minikube Cluster (requires VirtualBox)
|
||||
## make create-mini-cluster push-helm
|
||||
##
|
||||
## Create a KinD Cluster (Follow instructions to run command before pushing helm.)
|
||||
## # Create a KinD Cluster (Follow instructions to run command before pushing helm.)
|
||||
## make create-kind-cluster get-kind-kubeconfig
|
||||
## Finish KinD setup by installing helm:
|
||||
##
|
||||
## # Finish KinD setup by installing helm:
|
||||
## make push-helm
|
||||
##
|
||||
## Deploy Open Match
|
||||
## # Deploy Open Match
|
||||
## make push-images -j$(nproc)
|
||||
## make install-chart
|
||||
##
|
||||
## Build and Test
|
||||
## # Build and Test
|
||||
## make all -j$(nproc)
|
||||
## make test
|
||||
##
|
||||
## Access telemetry
|
||||
## # Access telemetry
|
||||
## make proxy-prometheus
|
||||
## make proxy-grafana
|
||||
## make proxy-ui
|
||||
##
|
||||
## Teardown
|
||||
## # Teardown
|
||||
## make delete-mini-cluster
|
||||
## make delete-gke-cluster
|
||||
## make delete-kind-cluster && export KUBECONFIG=""
|
||||
##
|
||||
## Prepare a Pull Request
|
||||
## # Prepare a Pull Request
|
||||
## make presubmit
|
||||
##
|
||||
|
||||
# If you want information on how to edit this file checkout,
|
||||
# http://makefiletutorial.com/
|
||||
|
||||
BASE_VERSION = 1.1.0
|
||||
BASE_VERSION = 1.4.0
|
||||
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
|
||||
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
|
||||
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
|
||||
@ -61,14 +62,14 @@ YEAR_MONTH = $(shell date -u +'%Y%m')
|
||||
YEAR_MONTH_DAY = $(shell date -u +'%Y%m%d')
|
||||
MAJOR_MINOR_VERSION = $(shell echo $(BASE_VERSION) | cut -d '.' -f1).$(shell echo $(BASE_VERSION) | cut -d '.' -f2)
|
||||
PROTOC_VERSION = 3.10.1
|
||||
HELM_VERSION = 3.0.0
|
||||
HELM_VERSION = 3.8.0
|
||||
KUBECTL_VERSION = 1.16.2
|
||||
MINIKUBE_VERSION = latest
|
||||
GOLANGCI_VERSION = 1.18.0
|
||||
KIND_VERSION = 0.5.1
|
||||
SWAGGERUI_VERSION = 3.24.2
|
||||
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
|
||||
GRPC_GATEWAY_VERSION = 1.14.3
|
||||
GRPC_GATEWAY_VERSION = 2.3.0
|
||||
TERRAFORM_VERSION = 0.12.13
|
||||
CHART_TESTING_VERSION = 2.4.0
|
||||
|
||||
@ -187,7 +188,7 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
|
||||
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go internal/ipb/messages.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
|
||||
|
||||
SWAGGER_JSON_DOCS = api/frontend.swagger.json api/backend.swagger.json api/query.swagger.json api/matchfunction.swagger.json api/evaluator.swagger.json
|
||||
|
||||
@ -197,7 +198,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
|
||||
CMDS = $(notdir $(wildcard cmd/*))
|
||||
|
||||
# Names of the individual images, ommiting the openmatch prefix.
|
||||
IMAGES = $(CMDS) mmf-go-soloduel base-build
|
||||
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-backfill base-build
|
||||
|
||||
help:
|
||||
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
|
||||
@ -209,27 +210,28 @@ local-cloud-build: gcloud
|
||||
################################################################################
|
||||
## #############################################################################
|
||||
## Image commands:
|
||||
## These commands are auto-generated based on a complete list of images. All
|
||||
## folders in cmd/ are turned into an image using Dockerfile.cmd. Additional
|
||||
## images are specified by the IMAGES variable. Image commands ommit the
|
||||
## "openmatch-" prefix on the image name and tags.
|
||||
## These commands are auto-generated based on a complete list of images.
|
||||
## All folders in cmd/ are turned into an image using Dockerfile.cmd.
|
||||
## Additional images are specified by the IMAGES variable.
|
||||
## Image commands ommit the "openmatch-" prefix on the image name and tags.
|
||||
##
|
||||
|
||||
list-images:
|
||||
@echo $(IMAGES)
|
||||
|
||||
#######################################
|
||||
## build-images / build-<image name>-image: builds images locally
|
||||
## # Builds images locally
|
||||
## build-images / build-<image name>-image
|
||||
##
|
||||
build-images: $(foreach IMAGE,$(IMAGES),build-$(IMAGE)-image)
|
||||
|
||||
# Include all-protos here so that all dependencies are guaranteed to be downloaded after the base image is created.
|
||||
# This is important so that the repository does not have any mutations while building individual images.
|
||||
build-base-build-image: docker $(ALL_PROTOS)
|
||||
docker build -f Dockerfile.base-build -t open-match-base-build -t $(REGISTRY)/openmatch-base-build:$(TAG) -t $(REGISTRY)/openmatch-base-build:$(ALTERNATE_TAG) .
|
||||
DOCKER_BUILDKIT=1 docker build -f Dockerfile.base-build -t open-match-base-build -t $(REGISTRY)/openmatch-base-build:$(TAG) -t $(REGISTRY)/openmatch-base-build:$(ALTERNATE_TAG) .
|
||||
|
||||
$(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-build-image
|
||||
docker build \
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f Dockerfile.cmd \
|
||||
$(IMAGE_BUILD_ARGS) \
|
||||
--build-arg=IMAGE_TITLE=$* \
|
||||
@ -238,11 +240,14 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
|
||||
.
|
||||
|
||||
build-mmf-go-soloduel-image: docker build-base-build-image
|
||||
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
|
||||
DOCKER_BUILDKIT=1 docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
|
||||
|
||||
build-mmf-go-backfill-image: docker build-base-build-image
|
||||
DOCKER_BUILDKIT=1 docker build -f examples/functions/golang/backfill/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-backfill:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-backfill:$(ALTERNATE_TAG) .
|
||||
|
||||
#######################################
|
||||
## push-images / push-<image name>-image: builds and pushes images to your
|
||||
## container registry.
|
||||
## # Builds and pushes images to your container registry.
|
||||
## push-images / push-<image name>-image
|
||||
##
|
||||
push-images: $(foreach IMAGE,$(IMAGES),push-$(IMAGE)-image)
|
||||
|
||||
@ -261,8 +266,9 @@ endif
|
||||
endif
|
||||
|
||||
#######################################
|
||||
## retag-images / retag-<image name>-image: publishes images on the public
|
||||
## container registry. Used for publishing releases.
|
||||
## # Publishes images on the public container registry.
|
||||
## # Used for publishing releases.
|
||||
## retag-images / retag-<image name>-image
|
||||
##
|
||||
retag-images: $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image)
|
||||
|
||||
@ -275,7 +281,8 @@ $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image): retag-%-image: docker
|
||||
docker push $(TARGET_REGISTRY)/openmatch-$*:$(TAG)
|
||||
|
||||
#######################################
|
||||
## clean-images / clean-<image name>-image: removes images from local docker
|
||||
## # Removes images from local docker
|
||||
## clean-images / clean-<image name>-image
|
||||
##
|
||||
clean-images: docker $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image)
|
||||
-docker rmi -f open-match-base-build
|
||||
@ -285,7 +292,7 @@ $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image): clean-%-image:
|
||||
|
||||
#####################################################################################################################
|
||||
update-chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
|
||||
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/stable; $(HELM) dependency update)
|
||||
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/incubator; $(HELM) repo add bitnami https://charts.bitnami.com/bitnami;$(HELM) dependency update)
|
||||
|
||||
lint-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/ct$(EXE_EXTENSION)
|
||||
(cd $(REPOSITORY_ROOT)/install/helm; $(HELM) lint $(OPEN_MATCH_HELM_NAME))
|
||||
@ -298,8 +305,8 @@ build/chart/open-match-$(BASE_VERSION).tgz: build/toolchain/bin/helm$(EXE_EXTENS
|
||||
|
||||
build/chart/index.yaml: build/toolchain/bin/helm$(EXE_EXTENSION) gcloud build/chart/open-match-$(BASE_VERSION).tgz
|
||||
mkdir -p $(BUILD_DIR)/chart-index/
|
||||
-gsutil cp gs://open-match-chart/chart/index.yaml $(BUILD_DIR)/chart-index/
|
||||
-gsutil -m cp gs://open-match-chart/chart/open-match-* $(BUILD_DIR)/chart-index/
|
||||
-gsutil cp $(_CHARTS_BUCKET)/chart/index.yaml $(BUILD_DIR)/chart-index/
|
||||
-gsutil -m cp $(_CHARTS_BUCKET)/chart/open-match-* $(BUILD_DIR)/chart-index/
|
||||
$(HELM) repo index $(BUILD_DIR)/chart-index/
|
||||
$(HELM) repo index --merge $(BUILD_DIR)/chart-index/index.yaml $(BUILD_DIR)/chart/
|
||||
|
||||
@ -313,7 +320,7 @@ install-chart-prerequisite: build/toolchain/bin/kubectl$(EXE_EXTENSION) update-c
|
||||
$(KUBECTL) apply -f install/gke-metadata-server-workaround.yaml
|
||||
|
||||
# Used for Open Match development. Install om-configmap-override.yaml by default.
|
||||
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD)
|
||||
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD) --set redis.auth.enabled=false --set redis.auth.sentinel=false
|
||||
# Used for generate static yamls. Install om-configmap-override.yaml as needed.
|
||||
HELM_TEMPLATE_FLAGS = --no-hooks --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --set usingHelmTemplate=true
|
||||
HELM_IMAGE_FLAGS = --set global.image.registry=$(REGISTRY) --set global.image.tag=$(TAG)
|
||||
@ -357,7 +364,10 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set global.telemetry.prometheus.enabled=true \
|
||||
--set global.telemetry.grafana.enabled=true \
|
||||
--set open-match-scale.enabled=true | $(KUBECTL) apply -f -
|
||||
--set global.kubernetes.serviceAccount=$(OPEN_MATCH_HELM_NAME)-unprivileged-service \
|
||||
--set open-match-scale.enabled=true \
|
||||
--set open-match-scale.configs.default.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-default\" \}\}" \
|
||||
--set open-match-scale.configs.override.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-override\" \}\}" | $(KUBECTL) apply -f -
|
||||
|
||||
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
|
||||
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
|
||||
@ -369,7 +379,7 @@ install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTEN
|
||||
--set open-match-core.registrationInterval=200ms \
|
||||
--set open-match-core.proposalCollectionInterval=200ms \
|
||||
--set open-match-core.assignedDeleteTimeout=200ms \
|
||||
--set open-match-core.pendingReleaseTimeout=200ms \
|
||||
--set open-match-core.pendingReleaseTimeout=1s \
|
||||
--set open-match-core.queryPageSize=10 \
|
||||
--set global.gcpProjectId=intentionally-invalid-value \
|
||||
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
|
||||
@ -468,10 +478,28 @@ set-redis-password:
|
||||
stty echo; \
|
||||
printf "\n"; \
|
||||
$(KUBECTL) create secret generic open-match-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
|
||||
## ####################################
|
||||
## # Tool installation helpers
|
||||
##
|
||||
|
||||
## # Install toolchain. Short for installing K8s, protoc and OpenMatch tools.
|
||||
## make install-toolchain
|
||||
##
|
||||
install-toolchain: install-kubernetes-tools install-protoc-tools install-openmatch-tools
|
||||
|
||||
## # Install Kubernetes tools
|
||||
## make install-kubernetes-tools
|
||||
##
|
||||
install-kubernetes-tools: build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/terraform$(EXE_EXTENSION)
|
||||
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
|
||||
|
||||
## # Install protoc tools
|
||||
## make install-protoc-tools
|
||||
##
|
||||
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
|
||||
|
||||
## # Install OpenMatch tools
|
||||
## make install-openmatch-tools
|
||||
##
|
||||
install-openmatch-tools: build/toolchain/bin/certgen$(EXE_EXTENSION) build/toolchain/bin/reaper$(EXE_EXTENSION)
|
||||
|
||||
build/toolchain/bin/helm$(EXE_EXTENSION):
|
||||
@ -543,11 +571,11 @@ build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION):
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/golang/protobuf/protoc-gen-go
|
||||
|
||||
build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION):
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway
|
||||
|
||||
build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
|
||||
build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION):
|
||||
mkdir -p $(TOOLCHAIN_BIN)
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2
|
||||
|
||||
build/toolchain/bin/certgen$(EXE_EXTENSION):
|
||||
mkdir -p $(TOOLCHAIN_BIN)
|
||||
@ -608,15 +636,16 @@ delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bi
|
||||
create-cluster-role-binding:
|
||||
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
|
||||
|
||||
create-gke-cluster: GKE_VERSION = 1.15.12-gke.20 # gcloud beta container get-server-config --zone us-west1-a
|
||||
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
|
||||
create-gke-cluster: GKE_VERSION = 1.20.8-gke.900 # gcloud beta container get-server-config --zone us-west1-a
|
||||
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-8 --enable-autoscaling --min-nodes 1 --num-nodes 6 --max-nodes 10 --disk-size 50
|
||||
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
|
||||
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
|
||||
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container clusters create $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GKE_CLUSTER_SHAPE_FLAGS) $(GKE_FUTURE_COMPAT_FLAGS) $(GKE_CLUSTER_FLAGS) \
|
||||
--enable-pod-security-policy \
|
||||
--cluster-version $(GKE_VERSION) \
|
||||
--image-type cos_containerd \
|
||||
--tags open-match
|
||||
--tags open-match \
|
||||
--workload-pool $(PROJECT_ID).svc.id.goog
|
||||
$(MAKE) create-cluster-role-binding
|
||||
|
||||
|
||||
@ -632,12 +661,19 @@ delete-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
|
||||
gcp-apply-binauthz-policy: build/policies/binauthz.yaml
|
||||
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container binauthz policy import build/policies/binauthz.yaml
|
||||
|
||||
## ####################################
|
||||
## # Protobuf
|
||||
##
|
||||
|
||||
## # Build all protobuf definitions.
|
||||
## make all-protos
|
||||
##
|
||||
all-protos: $(ALL_PROTOS)
|
||||
|
||||
# The proto generator really wants to be run from the $GOPATH root, and doesn't
|
||||
# support methods for directing it to the correct location that's not the proto
|
||||
# file's location. So instead put it in a tempororary directory, then move it
|
||||
# out.
|
||||
# file's location.
|
||||
# So, instead, put it in a tempororary directory, then move it out.
|
||||
pkg/pb/%.pb.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
|
||||
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/pkg/pb
|
||||
$(PROTOC) $< \
|
||||
@ -659,11 +695,15 @@ pkg/pb/%.pb.gw.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTE
|
||||
--grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)/build/prototmp
|
||||
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/$@ $@
|
||||
|
||||
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
|
||||
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
|
||||
$(PROTOC) $< \
|
||||
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
|
||||
--swagger_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
|
||||
--openapiv2_out=json_names_for_fields=false,logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
|
||||
|
||||
|
||||
## # Build API reference in markdown. Needs open-match-docs repo at the same level as this one.
|
||||
## make api/api.md
|
||||
##
|
||||
api/api.md: third_party/ build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION)
|
||||
$(PROTOC) api/*.proto \
|
||||
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
|
||||
@ -681,7 +721,15 @@ pkg/pb/matchfunction.pb.go: pkg/pb/messages.pb.go
|
||||
pkg/pb/query.pb.go: pkg/pb/messages.pb.go
|
||||
pkg/pb/evaluator.pb.go: pkg/pb/messages.pb.go
|
||||
internal/ipb/synchronizer.pb.go: pkg/pb/messages.pb.go
|
||||
internal/ipb/messages.pb.go: pkg/pb/messages.pb.go
|
||||
|
||||
## ####################################
|
||||
## # Go tasks
|
||||
##
|
||||
|
||||
## # Build assets and binaries
|
||||
## make build
|
||||
##
|
||||
build: assets
|
||||
$(GO) build ./...
|
||||
$(GO) build -tags e2ecluster ./...
|
||||
@ -703,9 +751,15 @@ define fast_test_folder
|
||||
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
|
||||
endef
|
||||
|
||||
## # Run go tests
|
||||
## make test
|
||||
##
|
||||
test: $(ALL_PROTOS) tls-certs third_party/
|
||||
$(call test_folder,.)
|
||||
|
||||
## # Run go tests more quickly, but with worse flake and race detection
|
||||
## make fasttest
|
||||
##
|
||||
fasttest: $(ALL_PROTOS) tls-certs third_party/
|
||||
$(call fast_test_folder,.)
|
||||
|
||||
@ -722,6 +776,9 @@ vet:
|
||||
golangci: build/toolchain/bin/golangci-lint$(EXE_EXTENSION)
|
||||
GO111MODULE=on $(GOLANGCI) run --config=$(REPOSITORY_ROOT)/.golangci.yaml
|
||||
|
||||
## # Run linter on Go code, charts and terraform
|
||||
## make lint
|
||||
##
|
||||
lint: fmt vet golangci lint-chart terraform-lint
|
||||
|
||||
assets: $(ALL_PROTOS) tls-certs third_party/ build/chart/
|
||||
@ -736,7 +793,7 @@ $(foreach CMD,$(CMDS),build/cmd/$(CMD)): build/cmd/%: build/cmd/%/BUILD_PHONY bu
|
||||
|
||||
build/cmd/%/BUILD_PHONY:
|
||||
mkdir -p $(BUILD_DIR)/cmd/$*
|
||||
CGO_ENABLED=0 $(GO) build -a -installsuffix cgo -o $(BUILD_DIR)/cmd/$*/run open-match.dev/open-match/cmd/$*
|
||||
CGO_ENABLED=0 $(GO) build -v -installsuffix cgo -o $(BUILD_DIR)/cmd/$*/run open-match.dev/open-match/cmd/$*
|
||||
|
||||
# Default is that nothing needs to be copied into the direcotry
|
||||
build/cmd/%/COPY_PHONY:
|
||||
@ -792,13 +849,13 @@ md-test: docker
|
||||
|
||||
ci-deploy-artifacts: install/yaml/ $(SWAGGER_JSON_DOCS) build/chart/ gcloud
|
||||
ifeq ($(_GCB_POST_SUBMIT),1)
|
||||
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* gs://open-match-chart/install/v$(BASE_VERSION)/yaml/
|
||||
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json gs://open-match-chart/api/v$(BASE_VERSION)/
|
||||
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* $(_CHARTS_BUCKET)/install/v$(BASE_VERSION)/yaml/
|
||||
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json $(_CHARTS_BUCKET)/api/v$(BASE_VERSION)/
|
||||
# Deploy Helm Chart
|
||||
# Since each build will refresh just it's version we can allow this for every post submit.
|
||||
# Copy the files into multiple locations to keep a backup.
|
||||
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/by-hash/$(VERSION)/
|
||||
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/
|
||||
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/by-hash/$(VERSION)/
|
||||
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/
|
||||
else
|
||||
@echo "Not deploying build artifacts to open-match.dev because this is not a post commit change."
|
||||
endif
|
||||
@ -927,7 +984,7 @@ proxy:
|
||||
update-deps:
|
||||
$(GO) mod tidy
|
||||
|
||||
third_party/: third_party/google/api third_party/protoc-gen-swagger/options third_party/swaggerui/
|
||||
third_party/: third_party/google/api third_party/protoc-gen-openapiv2/options third_party/swaggerui/
|
||||
|
||||
third_party/google/api:
|
||||
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
|
||||
@ -939,12 +996,12 @@ third_party/google/api:
|
||||
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
|
||||
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
|
||||
|
||||
third_party/protoc-gen-swagger/options:
|
||||
third_party/protoc-gen-openapiv2/options:
|
||||
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
|
||||
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
|
||||
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options
|
||||
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
|
||||
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
|
||||
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
|
||||
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-openapiv2/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options/
|
||||
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
|
||||
|
||||
third_party/swaggerui/:
|
||||
|
@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "protoc-gen-openapiv2/options/annotations.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
title: "Backend"
|
||||
version: "1.0"
|
||||
@ -93,7 +93,7 @@ message ReleaseAllTicketsRequest{}
|
||||
message ReleaseAllTicketsResponse {}
|
||||
|
||||
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
|
||||
message AssignmentGroup{
|
||||
message AssignmentGroup {
|
||||
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
|
||||
repeated string ticket_ids = 1;
|
||||
|
||||
@ -146,7 +146,6 @@ service BackendService {
|
||||
|
||||
// ReleaseTickets moves tickets from the pending state, to the active state.
|
||||
// This enables them to be returned by query, and find different matches.
|
||||
//
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc ReleaseTickets(ReleaseTicketsRequest) returns (ReleaseTicketsResponse) {
|
||||
@ -159,7 +158,6 @@ service BackendService {
|
||||
// ReleaseAllTickets moves all tickets from the pending state, to the active
|
||||
// state. This enables them to be returned by query, and find different
|
||||
// matches.
|
||||
//
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {
|
||||
|
@ -13,6 +13,11 @@
|
||||
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "BackendService"
|
||||
}
|
||||
],
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
@ -27,12 +32,21 @@
|
||||
"/v1/backendservice/matches:fetch": {
|
||||
"post": {
|
||||
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
|
||||
"operationId": "FetchMatches",
|
||||
"operationId": "BackendService_FetchMatches",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchFetchMatchesResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchFetchMatchesResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchFetchMatchesResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -41,6 +55,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -61,7 +81,7 @@
|
||||
"/v1/backendservice/tickets:assign": {
|
||||
"post": {
|
||||
"summary": "AssignTickets overwrites the Assignment field of the input TicketIds.",
|
||||
"operationId": "AssignTickets",
|
||||
"operationId": "BackendService_AssignTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -75,6 +95,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -94,9 +120,8 @@
|
||||
},
|
||||
"/v1/backendservice/tickets:release": {
|
||||
"post": {
|
||||
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
|
||||
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "ReleaseTickets",
|
||||
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "BackendService_ReleaseTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -110,6 +135,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -129,9 +160,8 @@
|
||||
},
|
||||
"/v1/backendservice/tickets:releaseall": {
|
||||
"post": {
|
||||
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
|
||||
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "ReleaseAllTickets",
|
||||
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "BackendService_ReleaseAllTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -145,6 +175,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -172,6 +208,17 @@
|
||||
],
|
||||
"default": "UNKNOWN"
|
||||
},
|
||||
"DoubleRangeFilterExclude": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"MIN",
|
||||
"MAX",
|
||||
"BOTH"
|
||||
],
|
||||
"default": "NONE",
|
||||
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
|
||||
},
|
||||
"openmatchAssignTicketsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -242,6 +289,37 @@
|
||||
},
|
||||
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
|
||||
},
|
||||
"openmatchBackfill": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Id represents an auto-generated Id issued by Open Match."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
|
||||
}
|
||||
},
|
||||
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -258,6 +336,10 @@
|
||||
"type": "number",
|
||||
"format": "double",
|
||||
"description": "Minimum value."
|
||||
},
|
||||
"exclude": {
|
||||
"$ref": "#/definitions/DoubleRangeFilterExclude",
|
||||
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
|
||||
}
|
||||
},
|
||||
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
|
||||
@ -336,6 +418,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"allocate_gameserver": {
|
||||
"type": "boolean",
|
||||
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
}
|
||||
},
|
||||
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
|
||||
@ -519,44 +609,27 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
"runtimeStreamError": {
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grpc_code": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"http_code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"http_status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchFetchMatchesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchFetchMatchesResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchFetchMatchesResponse"
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "protoc-gen-openapiv2/options/annotations.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
title: "Evaluator"
|
||||
version: "1.0"
|
||||
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
}
|
||||
// TODO Add annotations for security_defintiions.
|
||||
// See
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
|
||||
};
|
||||
|
||||
message EvaluateRequest {
|
||||
|
@ -13,6 +13,11 @@
|
||||
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "Evaluator"
|
||||
}
|
||||
],
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
@ -27,12 +32,21 @@
|
||||
"/v1/evaluator/matches:evaluate": {
|
||||
"post": {
|
||||
"summary": "Evaluate evaluates a list of proposed matches based on quality, collision status, and etc, then shortlist the matches and returns the final results.",
|
||||
"operationId": "Evaluate",
|
||||
"operationId": "Evaluator_Evaluate",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchEvaluateResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchEvaluateResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchEvaluateResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -41,6 +55,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -78,6 +98,37 @@
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchBackfill": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Id represents an auto-generated Id issued by Open Match."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
|
||||
}
|
||||
},
|
||||
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
|
||||
},
|
||||
"openmatchEvaluateRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -124,6 +175,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"allocate_gameserver": {
|
||||
"type": "boolean",
|
||||
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
}
|
||||
},
|
||||
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
|
||||
@ -201,44 +260,27 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
"runtimeStreamError": {
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grpc_code": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"http_code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"http_status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchEvaluateResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchEvaluateResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchEvaluateResponse"
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -19,10 +19,10 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "protoc-gen-openapiv2/options/annotations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
title: "Frontend"
|
||||
version: "1.0"
|
||||
@ -53,7 +53,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
}
|
||||
// TODO Add annotations for security_defintiions.
|
||||
// See
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
|
||||
};
|
||||
|
||||
message CreateTicketRequest {
|
||||
@ -81,6 +81,57 @@ message WatchAssignmentsResponse {
|
||||
Assignment assignment = 1;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message AcknowledgeBackfillRequest {
|
||||
// An existing ID of Backfill to acknowledge.
|
||||
string backfill_id = 1;
|
||||
|
||||
// An updated Assignment of the requested Backfill.
|
||||
Assignment assignment = 2;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message AcknowledgeBackfillResponse {
|
||||
// The Backfill that was acknowledged.
|
||||
Backfill backfill = 1;
|
||||
|
||||
// All of the Tickets that were successfully assigned
|
||||
repeated Ticket tickets = 2;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message CreateBackfillRequest {
|
||||
// An empty Backfill object.
|
||||
Backfill backfill = 1;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message DeleteBackfillRequest {
|
||||
// An existing ID of Backfill to delete.
|
||||
string backfill_id = 1;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message GetBackfillRequest {
|
||||
// An existing ID of Backfill to retrieve.
|
||||
string backfill_id = 1;
|
||||
}
|
||||
|
||||
// UpdateBackfillRequest - update searchFields, extensions and set assignment.
|
||||
//
|
||||
// BETA FEATURE WARNING: This Request message is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
message UpdateBackfillRequest {
|
||||
// A Backfill object with ID set and fields to update.
|
||||
Backfill backfill = 1;
|
||||
}
|
||||
|
||||
|
||||
// The FrontendService implements APIs to manage and query status of a Tickets.
|
||||
service FrontendService {
|
||||
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
|
||||
@ -117,4 +168,55 @@ service FrontendService {
|
||||
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
|
||||
};
|
||||
}
|
||||
|
||||
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info
|
||||
// This triggers an assignment process.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc AcknowledgeBackfill(AcknowledgeBackfillRequest) returns (AcknowledgeBackfillResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/frontendservice/backfills/{backfill_id}/acknowledge"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// CreateBackfill creates a new Backfill object.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc CreateBackfill(CreateBackfillRequest) returns (Backfill) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/frontendservice/backfills"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// DeleteBackfill receives a backfill ID and deletes its resource.
|
||||
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc DeleteBackfill(DeleteBackfillRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/frontendservice/backfills/{backfill_id}"
|
||||
};
|
||||
}
|
||||
|
||||
// GetBackfill returns a backfill object by its ID.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc GetBackfill(GetBackfillRequest) returns (Backfill) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/frontendservice/backfills/{backfill_id}"
|
||||
};
|
||||
}
|
||||
|
||||
// UpdateBackfill updates search_fields and extensions for the backfill with the provided id.
|
||||
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc UpdateBackfill(UpdateBackfillRequest) returns (Backfill) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1/frontendservice/backfills"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,11 @@
|
||||
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "FrontendService"
|
||||
}
|
||||
],
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
@ -24,10 +29,211 @@
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/v1/frontendservice/backfills": {
|
||||
"post": {
|
||||
"summary": "CreateBackfill creates a new Backfill object.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "FrontendService_CreateBackfill",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchBackfill"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchCreateBackfillRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"FrontendService"
|
||||
]
|
||||
},
|
||||
"patch": {
|
||||
"summary": "UpdateBackfill updates search_fields and extensions for the backfill with the provided id.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "FrontendService_UpdateBackfill",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchBackfill"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchUpdateBackfillRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"FrontendService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/frontendservice/backfills/{backfill_id}": {
|
||||
"get": {
|
||||
"summary": "GetBackfill returns a backfill object by its ID.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "FrontendService_GetBackfill",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchBackfill"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "backfill_id",
|
||||
"description": "An existing ID of Backfill to retrieve.",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"FrontendService"
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"summary": "DeleteBackfill receives a backfill ID and deletes its resource.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "FrontendService_DeleteBackfill",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"properties": {}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "backfill_id",
|
||||
"description": "An existing ID of Backfill to delete.",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"FrontendService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/frontendservice/backfills/{backfill_id}/acknowledge": {
|
||||
"post": {
|
||||
"summary": "AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info\nThis triggers an assignment process.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "FrontendService_AcknowledgeBackfill",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchAcknowledgeBackfillResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "backfill_id",
|
||||
"description": "An existing ID of Backfill to acknowledge.",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchAcknowledgeBackfillRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"FrontendService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/frontendservice/tickets": {
|
||||
"post": {
|
||||
"summary": "CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.\nA ticket is considered as ready for matchmaking once it is created.\n - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.\n - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.",
|
||||
"operationId": "CreateTicket",
|
||||
"operationId": "FrontendService_CreateTicket",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -41,6 +247,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -61,7 +273,7 @@
|
||||
"/v1/frontendservice/tickets/{ticket_id}": {
|
||||
"get": {
|
||||
"summary": "GetTicket get the Ticket associated with the specified TicketId.",
|
||||
"operationId": "GetTicket",
|
||||
"operationId": "FrontendService_GetTicket",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -75,6 +287,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -92,7 +310,7 @@
|
||||
},
|
||||
"delete": {
|
||||
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
|
||||
"operationId": "DeleteTicket",
|
||||
"operationId": "FrontendService_DeleteTicket",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
@ -106,6 +324,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -125,12 +349,21 @@
|
||||
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
|
||||
"get": {
|
||||
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
|
||||
"operationId": "WatchAssignments",
|
||||
"operationId": "FrontendService_WatchAssignments",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchWatchAssignmentsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -139,6 +372,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -157,6 +396,37 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"openmatchAcknowledgeBackfillRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backfill_id": {
|
||||
"type": "string",
|
||||
"description": "An existing ID of Backfill to acknowledge."
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An updated Assignment of the requested Backfill."
|
||||
}
|
||||
},
|
||||
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"openmatchAcknowledgeBackfillResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "The Backfill that was acknowledged."
|
||||
},
|
||||
"tickets": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchTicket"
|
||||
},
|
||||
"title": "All of the Tickets that were successfully assigned"
|
||||
}
|
||||
},
|
||||
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"openmatchAssignment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -174,6 +444,47 @@
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchBackfill": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Id represents an auto-generated Id issued by Open Match."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
|
||||
}
|
||||
},
|
||||
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
|
||||
},
|
||||
"openmatchCreateBackfillRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "An empty Backfill object."
|
||||
}
|
||||
},
|
||||
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"openmatchCreateTicketRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -241,6 +552,16 @@
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"openmatchUpdateBackfillRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "A Backfill object with ID set and fields to update."
|
||||
}
|
||||
},
|
||||
"description": "UpdateBackfillRequest - update searchFields, extensions and set assignment.\n\nBETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"openmatchWatchAssignmentsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -265,44 +586,27 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
"runtimeStreamError": {
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grpc_code": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"http_code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"http_status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchWatchAssignmentsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchWatchAssignmentsResponse"
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "protoc-gen-openapiv2/options/annotations.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
title: "Match Function"
|
||||
version: "1.0"
|
||||
@ -69,8 +69,9 @@ message RunResponse {
|
||||
// The MatchFunction service implements APIs to run user-defined matchmaking logics.
|
||||
service MatchFunction {
|
||||
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then
|
||||
// constructs and streams back match candidates to the Backend service.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService,
|
||||
// runs matchmaking logic against them, then constructs and streams back
|
||||
// match candidates to the Backend service.
|
||||
rpc Run(RunRequest) returns (stream RunResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/matchfunction:run"
|
||||
|
@ -13,6 +13,11 @@
|
||||
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "MatchFunction"
|
||||
}
|
||||
],
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
@ -26,13 +31,22 @@
|
||||
"paths": {
|
||||
"/v1/matchfunction:run": {
|
||||
"post": {
|
||||
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
|
||||
"operationId": "Run",
|
||||
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService,\nruns matchmaking logic against them, then constructs and streams back\nmatch candidates to the Backend service.",
|
||||
"operationId": "MatchFunction_Run",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchRunResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchRunResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchRunResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -41,6 +55,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -60,6 +80,17 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"DoubleRangeFilterExclude": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"MIN",
|
||||
"MAX",
|
||||
"BOTH"
|
||||
],
|
||||
"default": "NONE",
|
||||
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
|
||||
},
|
||||
"openmatchAssignment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -77,6 +108,37 @@
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchBackfill": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Id represents an auto-generated Id issued by Open Match."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
|
||||
}
|
||||
},
|
||||
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -93,6 +155,10 @@
|
||||
"type": "number",
|
||||
"format": "double",
|
||||
"description": "Minimum value."
|
||||
},
|
||||
"exclude": {
|
||||
"$ref": "#/definitions/DoubleRangeFilterExclude",
|
||||
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
|
||||
}
|
||||
},
|
||||
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
|
||||
@ -125,6 +191,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"backfill": {
|
||||
"$ref": "#/definitions/openmatchBackfill",
|
||||
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
},
|
||||
"allocate_gameserver": {
|
||||
"type": "boolean",
|
||||
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
|
||||
}
|
||||
},
|
||||
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
|
||||
@ -305,44 +379,27 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
"runtimeStreamError": {
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grpc_code": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"http_code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"http_status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchRunResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchRunResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchRunResponse"
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -103,6 +103,25 @@ message DoubleRangeFilter {
|
||||
|
||||
// Minimum value.
|
||||
double min = 3;
|
||||
|
||||
enum Exclude {
|
||||
// No bounds should be excluded when evaluating the filter, i.e.: MIN <= x <= MAX
|
||||
NONE = 0;
|
||||
|
||||
// Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN < x <= MAX
|
||||
MIN = 1;
|
||||
|
||||
// Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN <= x < MAX
|
||||
MAX = 2;
|
||||
|
||||
// Both bounds should be excluded when evaluating the filter, i.e.: MIN < x < MAX
|
||||
BOTH = 3;
|
||||
}
|
||||
|
||||
// Defines the bounds to apply when filtering tickets by their search_fields.double_args value.
|
||||
// BETA FEATURE WARNING: This field and the associated values are
|
||||
// not finalized and still subject to possible change or removal.
|
||||
Exclude exclude = 4;
|
||||
}
|
||||
|
||||
// Filters strings exactly equaling a value.
|
||||
@ -201,6 +220,45 @@ message Match {
|
||||
// Optional, depending on the requirements of the connected systems.
|
||||
map<string, google.protobuf.Any> extensions = 7;
|
||||
|
||||
// Backfill request which contains additional information to the match
|
||||
// and contains an association to a GameServer.
|
||||
// BETA FEATURE WARNING: This field is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
Backfill backfill = 8;
|
||||
|
||||
// AllocateGameServer signalise Director that Backfill is new and it should
|
||||
// allocate a GameServer, this Backfill would be assigned.
|
||||
// BETA FEATURE WARNING: This field is not finalized and still subject
|
||||
// to possible change or removal.
|
||||
bool allocate_gameserver = 9;
|
||||
|
||||
// Deprecated fields.
|
||||
reserved 5, 6;
|
||||
}
|
||||
|
||||
// Represents a backfill entity which is used to fill partially full matches.
|
||||
//
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
message Backfill {
|
||||
// Id represents an auto-generated Id issued by Open Match.
|
||||
string id = 1;
|
||||
|
||||
// Search fields are the fields which Open Match is aware of, and can be used
|
||||
// when specifying filters.
|
||||
SearchFields search_fields = 2;
|
||||
|
||||
// Customized information not inspected by Open Match, to be used by
|
||||
// the Match Function, evaluator, and components making calls to Open Match.
|
||||
// Optional, depending on the requirements of the connected systems.
|
||||
map<string, google.protobuf.Any> extensions = 3;
|
||||
|
||||
// Create time is the time the Ticket was created. It is populated by Open
|
||||
// Match at the time of Ticket creation.
|
||||
google.protobuf.Timestamp create_time = 4;
|
||||
|
||||
// Generation gets incremented on GameServers update operations.
|
||||
// Prevents the MMF from overriding a newer version from the game server.
|
||||
// Do NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs.
|
||||
int64 generation = 5;
|
||||
}
|
@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "protoc-gen-openapiv2/options/annotations.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
title: "MM Logic (Data Layer)"
|
||||
version: "1.0"
|
||||
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
}
|
||||
// TODO Add annotations for security_defintiions.
|
||||
// See
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
|
||||
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
|
||||
};
|
||||
|
||||
message QueryTicketsRequest {
|
||||
@ -75,6 +75,20 @@ message QueryTicketIdsResponse {
|
||||
repeated string ids = 1;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request messages are not finalized and
|
||||
// still subject to possible change or removal.
|
||||
message QueryBackfillsRequest {
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool pool = 1;
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request messages are not finalized and
|
||||
// still subject to possible change or removal.
|
||||
message QueryBackfillsResponse {
|
||||
// Backfills that meet all the filtering criteria requested by the pool.
|
||||
repeated Backfill backfills = 1;
|
||||
}
|
||||
|
||||
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
|
||||
service QueryService {
|
||||
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
|
||||
@ -98,4 +112,14 @@ service QueryService {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// QueryBackfills gets a list of Backfills.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc QueryBackfills(QueryBackfillsRequest) returns (stream QueryBackfillsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/queryservice/backfills:query"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,11 @@
|
||||
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "QueryService"
|
||||
}
|
||||
],
|
||||
"schemes": [
|
||||
"http",
|
||||
"https"
|
||||
@ -24,15 +29,24 @@
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/v1/queryservice/ticketids:query": {
|
||||
"/v1/queryservice/backfills:query": {
|
||||
"post": {
|
||||
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
|
||||
"operationId": "QueryTicketIds",
|
||||
"summary": "QueryBackfills gets a list of Backfills.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "QueryService_QueryBackfills",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryBackfillsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryBackfillsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -41,6 +55,61 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchQueryBackfillsRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"QueryService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/queryservice/ticketids:query": {
|
||||
"post": {
|
||||
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
|
||||
"operationId": "QueryService_QueryTicketIds",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryTicketIdsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -61,12 +130,21 @@
|
||||
"/v1/queryservice/tickets:query": {
|
||||
"post": {
|
||||
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
|
||||
"operationId": "QueryTickets",
|
||||
"operationId": "QueryService_QueryTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchQueryTicketsResponse"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryTicketsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -75,6 +153,12 @@
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
@ -94,6 +178,17 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"DoubleRangeFilterExclude": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NONE",
|
||||
"MIN",
|
||||
"MAX",
|
||||
"BOTH"
|
||||
],
|
||||
"default": "NONE",
|
||||
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
|
||||
},
|
||||
"openmatchAssignment": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -111,6 +206,37 @@
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchBackfill": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Id represents an auto-generated Id issued by Open Match."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
},
|
||||
"generation": {
|
||||
"type": "string",
|
||||
"format": "int64",
|
||||
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
|
||||
}
|
||||
},
|
||||
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -127,6 +253,10 @@
|
||||
"type": "number",
|
||||
"format": "double",
|
||||
"description": "Minimum value."
|
||||
},
|
||||
"exclude": {
|
||||
"$ref": "#/definitions/DoubleRangeFilterExclude",
|
||||
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
|
||||
}
|
||||
},
|
||||
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
|
||||
@ -170,6 +300,29 @@
|
||||
},
|
||||
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
|
||||
},
|
||||
"openmatchQueryBackfillsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pool": {
|
||||
"$ref": "#/definitions/openmatchPool",
|
||||
"description": "The Pool representing the set of Filters to be queried."
|
||||
}
|
||||
},
|
||||
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
|
||||
},
|
||||
"openmatchQueryBackfillsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backfills": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchBackfill"
|
||||
},
|
||||
"description": "Backfills that meet all the filtering criteria requested by the pool."
|
||||
}
|
||||
},
|
||||
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
|
||||
},
|
||||
"openmatchQueryTicketIdsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -307,56 +460,27 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
"runtimeStreamError": {
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"grpc_code": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"http_code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"http_status": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchQueryTicketIdsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryTicketIdsResponse"
|
||||
},
|
||||
"openmatchQueryTicketsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryTicketsResponse"
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -90,7 +90,7 @@ steps:
|
||||
|
||||
- id: 'Build: Assets'
|
||||
name: 'gcr.io/$PROJECT_ID/open-match-build'
|
||||
args: ['make', 'assets', '-j12']
|
||||
args: ['make', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'assets', '-j12']
|
||||
volumes:
|
||||
- name: 'go-vol'
|
||||
path: '/go'
|
||||
@ -106,7 +106,7 @@ steps:
|
||||
|
||||
- id: 'Test: Services'
|
||||
name: 'gcr.io/$PROJECT_ID/open-match-build'
|
||||
args: ['make', 'GOLANG_TEST_COUNT=10', 'test']
|
||||
args: ['make', 'GOPROXY=off', 'GOLANG_TEST_COUNT=10', 'test']
|
||||
volumes:
|
||||
- name: 'go-vol'
|
||||
path: '/go'
|
||||
@ -132,7 +132,7 @@ steps:
|
||||
|
||||
- id: 'Deploy: Deployment Configs'
|
||||
name: 'gcr.io/$PROJECT_ID/open-match-build'
|
||||
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', 'ci-deploy-artifacts']
|
||||
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'ci-deploy-artifacts']
|
||||
waitFor: ['Lint: Format, Vet, Charts', 'Test: Deploy Open Match']
|
||||
volumes:
|
||||
- name: 'go-vol'
|
||||
@ -164,11 +164,12 @@ artifacts:
|
||||
- install/yaml/06-open-match-override-configmap.yaml
|
||||
|
||||
substitutions:
|
||||
_OM_VERSION: "1.1.0"
|
||||
_OM_VERSION: "1.4.0"
|
||||
_GCB_POST_SUBMIT: "0"
|
||||
_GCB_LATEST_VERSION: "undefined"
|
||||
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/output/"
|
||||
_LOGS_BUCKET: "gs://open-match-build-logs/"
|
||||
_CHARTS_BUCKET: "gs://open-match-chart"
|
||||
logsBucket: '${_LOGS_BUCKET}'
|
||||
options:
|
||||
sourceProvenanceHash: ['SHA256']
|
||||
|
@ -46,7 +46,7 @@ make
|
||||
|
||||
*Typically for contributing you'll want to
|
||||
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
|
||||
but for purpose of this guide we'll be using the upstream/master.*
|
||||
but for purpose of this guide we'll be using the upstream/main.*
|
||||
|
||||
## Building code and images
|
||||
|
||||
|
@ -37,7 +37,7 @@ func New() *ByteSub {
|
||||
}
|
||||
}
|
||||
|
||||
// AnnounceLatest writes b to all of the subscribers, with caviets listed in Subscribe.
|
||||
// AnnounceLatest writes b to all of the subscribers, with caveats listed in Subscribe.
|
||||
func (s *ByteSub) AnnounceLatest(b []byte) {
|
||||
s.r.Lock()
|
||||
defer s.r.Unlock()
|
||||
|
@ -51,7 +51,7 @@ func TestFastAndSlow(t *testing.T) {
|
||||
for count := 0; true; count++ {
|
||||
if v := <-slow; v == "3" {
|
||||
if count > 1 {
|
||||
t.Error("Expected to recieve at most 1 other value on slow before recieving the latest value.")
|
||||
t.Error("Expected to receive at most 1 other value on slow before receiving the latest value.")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ type Updater struct {
|
||||
type SetFunc func(v interface{})
|
||||
|
||||
// New creates an Updater. Set is called when fields update, using the json
|
||||
// sererialized value of Updater's tree. All updates after ctx is canceled are
|
||||
// serialized value of Updater's tree. All updates after ctx is canceled are
|
||||
// ignored.
|
||||
func New(ctx context.Context, set func([]byte)) *Updater {
|
||||
f := func(v interface{}) {
|
||||
|
24
examples/functions/golang/backfill/Dockerfile
Normal file
24
examples/functions/golang/backfill/Dockerfile
Normal file
@ -0,0 +1,24 @@
|
||||
# Copyright 2020 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM open-match-base-build as builder
|
||||
|
||||
WORKDIR /go/src/open-match.dev/open-match/examples/functions/golang/backfill
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o matchfunction .
|
||||
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /app/
|
||||
COPY --from=builder --chown=nonroot /go/src/open-match.dev/open-match/examples/functions/golang/backfill/matchfunction /app/
|
||||
|
||||
ENTRYPOINT ["/app/matchfunction"]
|
33
examples/functions/golang/backfill/main.go
Normal file
33
examples/functions/golang/backfill/main.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package main defines a sample match function that uses the GRPC harness to set up
|
||||
// the match making function as a service. This sample is a reference
|
||||
// to demonstrate the usage of the GRPC harness and should only be used as
|
||||
// a starting point for your match function. You will need to modify the
|
||||
// matchmaking logic in this function based on your game's requirements.
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/examples/functions/golang/backfill/mmf"
|
||||
)
|
||||
|
||||
const (
|
||||
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
|
||||
serverPort = 50502 // The port for hosting the Match Function.
|
||||
)
|
||||
|
||||
func main() {
|
||||
mmf.Start(queryServiceAddr, serverPort)
|
||||
}
|
297
examples/functions/golang/backfill/mmf/matchfunction.go
Normal file
297
examples/functions/golang/backfill/mmf/matchfunction.go
Normal file
@ -0,0 +1,297 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
|
||||
// This sample is a reference to demonstrate the usage of backfill and should only be used as
|
||||
// a starting point for your match function. You will need to modify the
|
||||
// matchmaking logic in this function based on your game's requirements.
|
||||
package mmf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/pkg/matchfunction"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
playersPerMatch = 2
|
||||
openSlotsKey = "open-slots"
|
||||
matchName = "backfill-matchfunction"
|
||||
)
|
||||
|
||||
// matchFunctionService implements pb.MatchFunctionServer, the server generated
|
||||
// by compiling the protobuf, by fulfilling the pb.MatchFunctionServer interface.
|
||||
type matchFunctionService struct {
|
||||
grpc *grpc.Server
|
||||
queryServiceClient pb.QueryServiceClient
|
||||
port int
|
||||
}
|
||||
|
||||
func (s *matchFunctionService) Run(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
|
||||
log.Printf("Generating proposals for function %v", req.GetProfile().GetName())
|
||||
|
||||
var proposals []*pb.Match
|
||||
profile := req.GetProfile()
|
||||
pools := profile.GetPools()
|
||||
|
||||
for _, p := range pools {
|
||||
tickets, err := matchfunction.QueryPool(stream.Context(), s.queryServiceClient, p)
|
||||
if err != nil {
|
||||
log.Printf("Failed to query tickets for the given pool, got %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
backfills, err := matchfunction.QueryBackfillPool(stream.Context(), s.queryServiceClient, p)
|
||||
if err != nil {
|
||||
log.Printf("Failed to query backfills for the given pool, got %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
matches, err := makeMatches(profile, p, tickets, backfills)
|
||||
if err != nil {
|
||||
log.Printf("Failed to generate matches, got %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
proposals = append(proposals, matches...)
|
||||
}
|
||||
|
||||
log.Printf("Streaming %v proposals to Open Match", len(proposals))
|
||||
// Stream the generated proposals back to Open Match.
|
||||
for _, proposal := range proposals {
|
||||
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
|
||||
log.Printf("Failed to stream proposals to Open Match, got %s", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeMatches tries to handle backfills at first, then it makes full matches, at the end it makes a match with backfill
|
||||
// if tickets left
|
||||
func makeMatches(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, backfills []*pb.Backfill) ([]*pb.Match, error) {
|
||||
var matches []*pb.Match
|
||||
newMatches, remainingTickets, err := handleBackfills(profile, tickets, backfills, len(matches))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matches = append(matches, newMatches...)
|
||||
newMatches, remainingTickets = makeFullMatches(profile, remainingTickets, len(matches))
|
||||
matches = append(matches, newMatches...)
|
||||
|
||||
if len(remainingTickets) > 0 {
|
||||
match, err := makeMatchWithBackfill(profile, pool, remainingTickets, len(matches))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matches = append(matches, match)
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// handleBackfills looks at each backfill's openSlots which is a number of required tickets,
|
||||
// acquires that tickets, decreases openSlots in backfill and makes a match with updated backfill and associated tickets.
|
||||
func handleBackfills(profile *pb.MatchProfile, tickets []*pb.Ticket, backfills []*pb.Backfill, lastMatchId int) ([]*pb.Match, []*pb.Ticket, error) {
|
||||
matchId := lastMatchId
|
||||
var matches []*pb.Match
|
||||
|
||||
for _, b := range backfills {
|
||||
openSlots, err := getOpenSlots(b)
|
||||
if err != nil {
|
||||
return nil, tickets, err
|
||||
}
|
||||
|
||||
var matchTickets []*pb.Ticket
|
||||
for openSlots > 0 && len(tickets) > 0 {
|
||||
matchTickets = append(matchTickets, tickets[0])
|
||||
tickets = tickets[1:]
|
||||
openSlots--
|
||||
}
|
||||
|
||||
if len(matchTickets) > 0 {
|
||||
err := setOpenSlots(b, openSlots)
|
||||
if err != nil {
|
||||
return nil, tickets, err
|
||||
}
|
||||
|
||||
matchId++
|
||||
match := newMatch(matchId, profile.Name, matchTickets, b)
|
||||
matches = append(matches, &match)
|
||||
}
|
||||
}
|
||||
|
||||
return matches, tickets, nil
|
||||
}
|
||||
|
||||
// makeMatchWithBackfill makes not full match, creates backfill for it with openSlots = playersPerMatch-len(tickets).
|
||||
func makeMatchWithBackfill(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, lastMatchId int) (*pb.Match, error) {
|
||||
if len(tickets) == 0 {
|
||||
return nil, fmt.Errorf("tickets are required")
|
||||
}
|
||||
|
||||
if len(tickets) >= playersPerMatch {
|
||||
return nil, fmt.Errorf("too many tickets")
|
||||
}
|
||||
|
||||
matchId := lastMatchId
|
||||
searchFields := newSearchFields(pool)
|
||||
backfill, err := newBackfill(searchFields, playersPerMatch-len(tickets))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matchId++
|
||||
match := newMatch(matchId, profile.Name, tickets, backfill)
|
||||
// indicates that it is a new match and new game server should be allocated for it
|
||||
match.AllocateGameserver = true
|
||||
|
||||
return &match, nil
|
||||
}
|
||||
|
||||
// makeFullMatches makes matches without backfill
|
||||
func makeFullMatches(profile *pb.MatchProfile, tickets []*pb.Ticket, lastMatchId int) ([]*pb.Match, []*pb.Ticket) {
|
||||
ticketNum := 0
|
||||
matchId := lastMatchId
|
||||
var matches []*pb.Match
|
||||
|
||||
for ticketNum < playersPerMatch && len(tickets) >= playersPerMatch {
|
||||
ticketNum++
|
||||
|
||||
if ticketNum == playersPerMatch {
|
||||
matchId++
|
||||
|
||||
match := newMatch(matchId, profile.Name, tickets[:playersPerMatch], nil)
|
||||
matches = append(matches, &match)
|
||||
|
||||
tickets = tickets[playersPerMatch:]
|
||||
ticketNum = 0
|
||||
}
|
||||
}
|
||||
|
||||
return matches, tickets
|
||||
}
|
||||
|
||||
// newSearchFields creates search fields based on pool's search criteria. This is just example of how it can be done.
|
||||
func newSearchFields(pool *pb.Pool) *pb.SearchFields {
|
||||
searchFields := pb.SearchFields{}
|
||||
rangeFilters := pool.GetDoubleRangeFilters()
|
||||
|
||||
if rangeFilters != nil {
|
||||
doubleArgs := make(map[string]float64)
|
||||
for _, f := range rangeFilters {
|
||||
doubleArgs[f.DoubleArg] = (f.Max - f.Min) / 2
|
||||
}
|
||||
|
||||
if len(doubleArgs) > 0 {
|
||||
searchFields.DoubleArgs = doubleArgs
|
||||
}
|
||||
}
|
||||
|
||||
stringFilters := pool.GetStringEqualsFilters()
|
||||
|
||||
if stringFilters != nil {
|
||||
stringArgs := make(map[string]string)
|
||||
for _, f := range stringFilters {
|
||||
stringArgs[f.StringArg] = f.Value
|
||||
}
|
||||
|
||||
if len(stringArgs) > 0 {
|
||||
searchFields.StringArgs = stringArgs
|
||||
}
|
||||
}
|
||||
|
||||
tagFilters := pool.GetTagPresentFilters()
|
||||
|
||||
if tagFilters != nil {
|
||||
tags := make([]string, len(tagFilters))
|
||||
for _, f := range tagFilters {
|
||||
tags = append(tags, f.Tag)
|
||||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
searchFields.Tags = tags
|
||||
}
|
||||
}
|
||||
|
||||
return &searchFields
|
||||
}
|
||||
|
||||
func newBackfill(searchFields *pb.SearchFields, openSlots int) (*pb.Backfill, error) {
|
||||
b := pb.Backfill{
|
||||
SearchFields: searchFields,
|
||||
Generation: 0,
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
}
|
||||
|
||||
err := setOpenSlots(&b, int32(openSlots))
|
||||
return &b, err
|
||||
}
|
||||
|
||||
func newMatch(num int, profile string, tickets []*pb.Ticket, b *pb.Backfill) pb.Match {
|
||||
t := time.Now().Format("2006-01-02T15:04:05.00")
|
||||
|
||||
return pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%s-time-%s-num-%d", profile, t, num),
|
||||
MatchProfile: profile,
|
||||
MatchFunction: matchName,
|
||||
Tickets: tickets,
|
||||
Backfill: b,
|
||||
}
|
||||
}
|
||||
|
||||
func setOpenSlots(b *pb.Backfill, val int32) error {
|
||||
if b.Extensions == nil {
|
||||
b.Extensions = make(map[string]*any.Any)
|
||||
}
|
||||
|
||||
any, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Extensions[openSlotsKey] = any
|
||||
return nil
|
||||
}
|
||||
|
||||
func getOpenSlots(b *pb.Backfill) (int32, error) {
|
||||
if b == nil {
|
||||
return 0, fmt.Errorf("expected backfill is not nil")
|
||||
}
|
||||
|
||||
if b.Extensions != nil {
|
||||
if any, ok := b.Extensions[openSlotsKey]; ok {
|
||||
var val wrappers.Int32Value
|
||||
err := ptypes.UnmarshalAny(any, &val)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return val.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return playersPerMatch, nil
|
||||
}
|
142
examples/functions/golang/backfill/mmf/matchfunction_test.go
Normal file
142
examples/functions/golang/backfill/mmf/matchfunction_test.go
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package mmf
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/stretchr/testify/require"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestHandleBackfills(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
tickets []*pb.Ticket
|
||||
backfills []*pb.Backfill
|
||||
lastMatchId int
|
||||
expectedMatchLen int
|
||||
expectedTicketLen int
|
||||
expectedOpenSlots int32
|
||||
expectedErr bool
|
||||
}{
|
||||
{name: "returns no matches when no backfills specified", expectedMatchLen: 0, expectedTicketLen: 0},
|
||||
{name: "returns no matches when no tickets specified", expectedMatchLen: 0, expectedTicketLen: 0},
|
||||
{name: "returns a match with open slots decreased", tickets: []*pb.Ticket{{Id: "1"}}, backfills: []*pb.Backfill{withOpenSlots(1)}, expectedMatchLen: 1, expectedTicketLen: 0, expectedOpenSlots: playersPerMatch - 2},
|
||||
} {
|
||||
testCase := tc
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
profile := pb.MatchProfile{Name: "matchProfile"}
|
||||
matches, tickets, err := handleBackfills(&profile, testCase.tickets, testCase.backfills, testCase.lastMatchId)
|
||||
require.Equal(t, testCase.expectedErr, err != nil)
|
||||
require.Equal(t, testCase.expectedTicketLen, len(tickets))
|
||||
|
||||
if err != nil {
|
||||
require.Equal(t, 0, len(matches))
|
||||
} else {
|
||||
for _, m := range matches {
|
||||
require.NotNil(t, m.Backfill)
|
||||
|
||||
openSlots, err := getOpenSlots(m.Backfill)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, testCase.expectedOpenSlots, openSlots)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeMatchWithBackfill(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
tickets []*pb.Ticket
|
||||
lastMatchId int
|
||||
expectedOpenSlots int32
|
||||
expectedErr bool
|
||||
}{
|
||||
{name: "returns an error when length of tickets is greater then playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}, {Id: "5"}}, expectedErr: true},
|
||||
{name: "returns an error when length of tickets is equal to playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}}, expectedErr: true},
|
||||
{name: "returns an error when no tickets are provided", expectedErr: true},
|
||||
{name: "returns a match with backfill", tickets: []*pb.Ticket{{Id: "1"}}, expectedOpenSlots: playersPerMatch - 1},
|
||||
} {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
pool := pb.Pool{}
|
||||
profile := pb.MatchProfile{Name: "matchProfile"}
|
||||
match, err := makeMatchWithBackfill(&profile, &pool, testCase.tickets, testCase.lastMatchId)
|
||||
require.Equal(t, testCase.expectedErr, err != nil)
|
||||
|
||||
if err == nil {
|
||||
require.NotNil(t, match)
|
||||
require.NotNil(t, match.Backfill)
|
||||
require.True(t, match.AllocateGameserver)
|
||||
require.Equal(t, "", match.Backfill.Id)
|
||||
|
||||
openSlots, err := getOpenSlots(match.Backfill)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, testCase.expectedOpenSlots, openSlots)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeFullMatches(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
name string
|
||||
tickets []*pb.Ticket
|
||||
lastMatchId int
|
||||
expectedMatchLen int
|
||||
expectedTicketLen int
|
||||
}{
|
||||
{name: "returns no matches when there are no tickets", tickets: []*pb.Ticket{}, expectedMatchLen: 0, expectedTicketLen: 0},
|
||||
{name: "returns no matches when length of tickets is less then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}}, expectedMatchLen: 0, expectedTicketLen: 1},
|
||||
{name: "returns a match when length of tickets is greater then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}}, expectedMatchLen: 1, expectedTicketLen: 0},
|
||||
} {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
profile := pb.MatchProfile{Name: "matchProfile"}
|
||||
matches, tickets := makeFullMatches(&profile, testCase.tickets, testCase.lastMatchId)
|
||||
|
||||
require.Equal(t, testCase.expectedMatchLen, len(matches))
|
||||
require.Equal(t, testCase.expectedTicketLen, len(tickets))
|
||||
|
||||
for _, m := range matches {
|
||||
require.Nil(t, m.Backfill)
|
||||
require.Equal(t, playersPerMatch, len(m.Tickets))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withOpenSlots(openSlots int) *pb.Backfill {
|
||||
val, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: int32(openSlots)})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &pb.Backfill{
|
||||
Extensions: map[string]*any.Any{
|
||||
openSlotsKey: val,
|
||||
},
|
||||
}
|
||||
}
|
59
examples/functions/golang/backfill/mmf/server.go
Normal file
59
examples/functions/golang/backfill/mmf/server.go
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
|
||||
// This sample is a reference to demonstrate the usage of backfill and should only be used as
|
||||
// a starting point for your match function. You will need to modify the
|
||||
// matchmaking logic in this function based on your game's requirements.
|
||||
package mmf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func Start(queryServiceAddr string, serverPort int) {
|
||||
// Connect to QueryService.
|
||||
conn, err := grpc.Dial(queryServiceAddr, grpc.WithInsecure())
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to Open Match, got %s", err.Error())
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
|
||||
mmfService := matchFunctionService{
|
||||
queryServiceClient: pb.NewQueryServiceClient(conn),
|
||||
}
|
||||
|
||||
// Create and host a new gRPC service on the configured port.
|
||||
server := grpc.NewServer()
|
||||
pb.RegisterMatchFunctionServer(server, &mmfService)
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", serverPort))
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("TCP net listener initialization failed for port %v, got %s", serverPort, err.Error())
|
||||
}
|
||||
|
||||
log.Printf("TCP net listener initialized for port %v", serverPort)
|
||||
err = server.Serve(ln)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("gRPC serve failed, got %s", err.Error())
|
||||
}
|
||||
}
|
@ -40,16 +40,16 @@ var (
|
||||
|
||||
activeScenario = scenarios.ActiveScenario
|
||||
|
||||
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
|
||||
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
|
||||
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
|
||||
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
|
||||
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
|
||||
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
|
||||
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
|
||||
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
|
||||
mTicketsDeleted = telemetry.Counter("scale_backend_tickets_deleted", "tickets deleted")
|
||||
mTicketDeletesFailed = telemetry.Counter("scale_backend_ticket_deletes_failed", "ticket deletes failed")
|
||||
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
|
||||
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
|
||||
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
|
||||
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
|
||||
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
|
||||
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
|
||||
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
|
||||
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
|
||||
mBackfillsDeleted = telemetry.Counter("scale_backend_backfills_deleted", "backfills deleted")
|
||||
mBackfillDeletesFailed = telemetry.Counter("scale_backend_backfill_deletes_failed", "backfill deletes failed")
|
||||
)
|
||||
|
||||
// Run triggers execution of functions that continuously fetch, assign and
|
||||
@ -79,12 +79,28 @@ func run(cfg config.View) {
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
matchesForAssignment := make(chan *pb.Match, 30000)
|
||||
ticketsForDeletion := make(chan string, 30000)
|
||||
matchesToAssign := make(chan *pb.Match, 30000)
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
go runAssignments(be, matchesForAssignment, ticketsForDeletion)
|
||||
go runDeletions(fe, ticketsForDeletion)
|
||||
if activeScenario.BackendAssignsTickets {
|
||||
for i := 0; i < 100; i++ {
|
||||
go runAssignments(be, matchesToAssign)
|
||||
}
|
||||
}
|
||||
|
||||
backfillsToDelete := make(chan *pb.Backfill, 30000)
|
||||
|
||||
if activeScenario.BackendDeletesBackfills {
|
||||
for i := 0; i < 100; i++ {
|
||||
go runDeleteBackfills(fe, backfillsToDelete)
|
||||
}
|
||||
}
|
||||
|
||||
matchesToAcknowledge := make(chan *pb.Match, 30000)
|
||||
|
||||
if activeScenario.BackendAcknowledgesBackfills {
|
||||
for i := 0; i < 100; i++ {
|
||||
go runAcknowledgeBackfills(fe, matchesToAcknowledge, backfillsToDelete)
|
||||
}
|
||||
}
|
||||
|
||||
// Don't go faster than this, as it likely means that FetchMatches is throwing
|
||||
@ -98,7 +114,7 @@ func run(cfg config.View) {
|
||||
wg.Add(1)
|
||||
go func(wg *sync.WaitGroup, p *pb.MatchProfile) {
|
||||
defer wg.Done()
|
||||
runFetchMatches(be, p, matchesForAssignment)
|
||||
runFetchMatches(be, p, matchesToAssign, matchesToAcknowledge)
|
||||
}(&wg, p)
|
||||
}
|
||||
|
||||
@ -108,13 +124,13 @@ func run(cfg config.View) {
|
||||
}
|
||||
}
|
||||
|
||||
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForAssignment chan<- *pb.Match) {
|
||||
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesToAssign chan<- *pb.Match, matchesToAcknowledge chan<- *pb.Match) {
|
||||
ctx, span := trace.StartSpan(context.Background(), "scale.backend/FetchMatches")
|
||||
defer span.End()
|
||||
|
||||
req := &pb.FetchMatchesRequest{
|
||||
Config: &pb.FunctionConfig{
|
||||
Host: "om-function",
|
||||
Host: "open-match-function",
|
||||
Port: 50502,
|
||||
Type: pb.FunctionConfig_GRPC,
|
||||
},
|
||||
@ -146,62 +162,90 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
|
||||
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
|
||||
|
||||
matchesForAssignment <- resp.GetMatch()
|
||||
if activeScenario.BackendAssignsTickets {
|
||||
matchesToAssign <- resp.GetMatch()
|
||||
}
|
||||
|
||||
if activeScenario.BackendAcknowledgesBackfills {
|
||||
matchesToAcknowledge <- resp.GetMatch()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.Match, ticketsForDeletion chan<- string) {
|
||||
func runDeleteBackfills(fe pb.FrontendServiceClient, backfillsToDelete <-chan *pb.Backfill) {
|
||||
for b := range backfillsToDelete {
|
||||
if !activeScenario.BackfillDeleteCond(b) {
|
||||
continue
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := fe.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: b.Id})
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to delete backfill: %s", b.Id)
|
||||
telemetry.RecordUnitMeasurement(ctx, mBackfillDeletesFailed)
|
||||
} else {
|
||||
telemetry.RecordUnitMeasurement(ctx, mBackfillsDeleted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runAcknowledgeBackfills(fe pb.FrontendServiceClient, matchesToAcknowledge <-chan *pb.Match, backfillsToDelete chan<- *pb.Backfill) {
|
||||
for m := range matchesToAcknowledge {
|
||||
backfillId := m.Backfill.GetId()
|
||||
if backfillId == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := acknowledgeBackfill(fe, backfillId)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to acknowledge backfill: %s", backfillId)
|
||||
continue
|
||||
}
|
||||
|
||||
if activeScenario.BackendDeletesBackfills {
|
||||
backfillsToDelete <- m.Backfill
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func acknowledgeBackfill(fe pb.FrontendServiceClient, backfillId string) error {
|
||||
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/AcknowledgeBackfill")
|
||||
defer span.End()
|
||||
|
||||
_, err := fe.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{
|
||||
BackfillId: backfillId,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func runAssignments(be pb.BackendServiceClient, matchesToAssign <-chan *pb.Match) {
|
||||
ctx := context.Background()
|
||||
|
||||
for m := range matchesForAssignment {
|
||||
for m := range matchesToAssign {
|
||||
ids := []string{}
|
||||
for _, t := range m.Tickets {
|
||||
ids = append(ids, t.GetId())
|
||||
}
|
||||
|
||||
if activeScenario.BackendAssignsTickets {
|
||||
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
},
|
||||
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
|
||||
logger.WithError(err).Error("failed to assign tickets")
|
||||
continue
|
||||
}
|
||||
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
|
||||
logger.WithError(err).Error("failed to assign tickets")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
ticketsForDeletion <- id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string) {
|
||||
ctx := context.Background()
|
||||
|
||||
for id := range ticketsForDeletion {
|
||||
if activeScenario.BackendDeletesTickets {
|
||||
req := &pb.DeleteTicketRequest{
|
||||
TicketId: id,
|
||||
}
|
||||
|
||||
_, err := fe.DeleteTicket(context.Background(), req)
|
||||
|
||||
if err == nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
|
||||
} else {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
|
||||
logger.WithError(err).Error("failed to delete tickets")
|
||||
}
|
||||
}
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
|
||||
}
|
||||
}
|
||||
|
@ -38,12 +38,22 @@ var (
|
||||
})
|
||||
activeScenario = scenarios.ActiveScenario
|
||||
|
||||
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
|
||||
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
|
||||
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
|
||||
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
|
||||
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
|
||||
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
|
||||
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
|
||||
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
|
||||
mTicketsDeleted = telemetry.Counter("scale_frontend_tickets_deleted", "tickets deleted")
|
||||
mTicketDeletesFailed = telemetry.Counter("scale_frontend_ticket_deletes_failed", "ticket deletes failed")
|
||||
mBackfillsCreated = telemetry.Counter("scale_frontend_backfills_created", "backfills_created")
|
||||
mBackfillCreationsFailed = telemetry.Counter("scale_frontend_backfill_creations_failed", "backfill creations failed")
|
||||
mTicketsTimeToAssignment = telemetry.HistogramWithBounds("scale_frontend_tickets_time_to_assignment", "tickets time to assignment", stats.UnitMilliseconds, []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000})
|
||||
)
|
||||
|
||||
type ticketToWatch struct {
|
||||
id string
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
// Run triggers execution of the scale frontend component that creates
|
||||
// tickets at scale in Open Match.
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
@ -61,9 +71,12 @@ func run(cfg config.View) {
|
||||
}
|
||||
fe := pb.NewFrontendServiceClient(conn)
|
||||
|
||||
if activeScenario.FrontendCreatesBackfillsOnStart {
|
||||
createBackfills(fe, activeScenario.FrontendTotalBackfillsToCreate)
|
||||
}
|
||||
|
||||
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
|
||||
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
|
||||
|
||||
totalCreated := 0
|
||||
|
||||
for range time.Tick(time.Second) {
|
||||
@ -89,13 +102,27 @@ func runner(fe pb.FrontendServiceClient) {
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
|
||||
g.start(mRunnersCreating)
|
||||
createdAt := time.Now()
|
||||
id, err := createTicket(ctx, fe)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to create a ticket")
|
||||
return
|
||||
}
|
||||
|
||||
_ = id
|
||||
err = watchAssignments(ctx, fe, ticketToWatch{id: id, createdAt: createdAt})
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to get ticket assignment: %s", id)
|
||||
} else {
|
||||
ms := time.Since(createdAt).Nanoseconds() / 1e6
|
||||
stats.Record(ctx, mTicketsTimeToAssignment.M(ms))
|
||||
}
|
||||
|
||||
if activeScenario.FrontendDeletesTickets {
|
||||
err = deleteTicket(ctx, fe, id)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to delete ticket: %s", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
|
||||
@ -116,6 +143,68 @@ func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, err
|
||||
return resp.Id, nil
|
||||
}
|
||||
|
||||
func watchAssignments(ctx context.Context, fe pb.FrontendServiceClient, ticket ticketToWatch) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
stream, err := fe.WatchAssignments(ctx, &pb.WatchAssignmentsRequest{TicketId: ticket.id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var a *pb.Assignment
|
||||
for a.GetConnection() == "" {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a = resp.Assignment
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBackfills(fe pb.FrontendServiceClient, numBackfillsToCreate int) error {
|
||||
for i := 0; i < numBackfillsToCreate; i++ {
|
||||
err := createBackfill(fe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBackfill(fe pb.FrontendServiceClient) error {
|
||||
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateBackfill")
|
||||
defer span.End()
|
||||
|
||||
req := pb.CreateBackfillRequest{
|
||||
Backfill: activeScenario.Backfill(),
|
||||
}
|
||||
|
||||
_, err := fe.CreateBackfill(ctx, &req)
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mBackfillCreationsFailed)
|
||||
logger.WithError(err).Error("failed to create backfill")
|
||||
return err
|
||||
}
|
||||
|
||||
telemetry.RecordUnitMeasurement(ctx, mBackfillsCreated)
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteTicket(ctx context.Context, fe pb.FrontendServiceClient, ticketId string) error {
|
||||
_, err := fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticketId})
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
|
||||
} else {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Allows concurrent moficiation of a gauge value by modifying the concurrent
|
||||
// value with a delta.
|
||||
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {
|
||||
|
271
examples/scale/scenarios/backfill/backfill.go
Normal file
271
examples/scale/scenarios/backfill/backfill.go
Normal file
@ -0,0 +1,271 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
poolName = "all"
|
||||
openSlotsKey = "open-slots"
|
||||
)
|
||||
|
||||
func Scenario() *BackfillScenario {
|
||||
ticketsPerMatch := 4
|
||||
return &BackfillScenario{
|
||||
TicketsPerMatch: ticketsPerMatch,
|
||||
MaxTicketsPerNotFullMatch: 3,
|
||||
BackfillDeleteCond: func(b *pb.Backfill) bool {
|
||||
openSlots := getOpenSlots(b, ticketsPerMatch)
|
||||
return openSlots <= 0
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type BackfillScenario struct {
|
||||
TicketsPerMatch int
|
||||
MaxTicketsPerNotFullMatch int
|
||||
BackfillDeleteCond func(*pb.Backfill) bool
|
||||
}
|
||||
|
||||
func (s *BackfillScenario) Profiles() []*pb.MatchProfile {
|
||||
return []*pb.MatchProfile{
|
||||
{
|
||||
Name: "entirePool",
|
||||
Pools: []*pb.Pool{
|
||||
{
|
||||
Name: poolName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BackfillScenario) Ticket() *pb.Ticket {
|
||||
return &pb.Ticket{}
|
||||
}
|
||||
|
||||
func (s *BackfillScenario) Backfill() *pb.Backfill {
|
||||
return &pb.Backfill{}
|
||||
}
|
||||
|
||||
func (s *BackfillScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
return statefullMMF(p, poolBackfills, poolTickets, s.TicketsPerMatch, s.MaxTicketsPerNotFullMatch)
|
||||
}
|
||||
|
||||
// statefullMMF is a MMF implementation which is used in scenario when we want MMF to create not full match and fill it later.
|
||||
// 1. The first FetchMatches is called
|
||||
// 2. MMF grabs maxTicketsPerNotFullMatch tickets and makes a match and new backfill for it
|
||||
// 3. MMF sets backfill's open slots to ticketsPerMatch - maxTicketsPerNotFullMatch
|
||||
// 4. MMF returns the match as a result
|
||||
// 5. The second FetchMatches is called
|
||||
// 6. MMF gets previously created backfill
|
||||
// 7. MMF gets backfill's open slots value
|
||||
// 8. MMF grabs openSlots tickets and makes a match with previously created backfill
|
||||
// 9. MMF sets backfill's open slots to 0
|
||||
// 10. MMF returns the match as a result
|
||||
func statefullMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int, maxTicketsPerNotFullMatch int) ([]*pb.Match, error) {
|
||||
var matches []*pb.Match
|
||||
|
||||
for pool, backfills := range poolBackfills {
|
||||
tickets, ok := poolTickets[pool]
|
||||
|
||||
if !ok || len(tickets) == 0 {
|
||||
// no tickets in pool
|
||||
continue
|
||||
}
|
||||
|
||||
// process backfills first
|
||||
for _, b := range backfills {
|
||||
l := len(tickets)
|
||||
if l == 0 {
|
||||
// no tickets left
|
||||
break
|
||||
}
|
||||
|
||||
openSlots := getOpenSlots(b, ticketsPerMatch)
|
||||
if openSlots <= 0 {
|
||||
// no free open slots
|
||||
continue
|
||||
}
|
||||
|
||||
if l > openSlots {
|
||||
l = openSlots
|
||||
}
|
||||
|
||||
setOpenSlots(b, openSlots-l)
|
||||
matches = append(matches, &pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
Tickets: tickets[0:l],
|
||||
MatchProfile: p.GetName(),
|
||||
MatchFunction: "backfill",
|
||||
Backfill: b,
|
||||
})
|
||||
tickets = tickets[l:]
|
||||
}
|
||||
|
||||
// create not full matches with backfill
|
||||
for {
|
||||
l := len(tickets)
|
||||
if l == 0 {
|
||||
// no tickets left
|
||||
break
|
||||
}
|
||||
|
||||
if l > maxTicketsPerNotFullMatch {
|
||||
l = maxTicketsPerNotFullMatch
|
||||
}
|
||||
b := pb.Backfill{}
|
||||
setOpenSlots(&b, ticketsPerMatch-l)
|
||||
matches = append(matches, &pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
Tickets: tickets[0:l],
|
||||
MatchProfile: p.GetName(),
|
||||
MatchFunction: "backfill",
|
||||
Backfill: &b,
|
||||
AllocateGameserver: true,
|
||||
})
|
||||
tickets = tickets[l:]
|
||||
}
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
func getOpenSlots(b *pb.Backfill, defaultVal int) int {
|
||||
if b.Extensions == nil {
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
any, ok := b.Extensions[openSlotsKey]
|
||||
if !ok {
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
var val wrappers.Int32Value
|
||||
err := ptypes.UnmarshalAny(any, &val)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int(val.Value)
|
||||
}
|
||||
|
||||
func setOpenSlots(b *pb.Backfill, val int) {
|
||||
if b.Extensions == nil {
|
||||
b.Extensions = make(map[string]*any.Any)
|
||||
}
|
||||
|
||||
any, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: int32(val)})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.Extensions[openSlotsKey] = any
|
||||
}
|
||||
|
||||
// statelessMMF is a MMF implementation which is used in scenario when we want MMF to fill backfills created by a Gameserver. It doesn't create
|
||||
// or update any backfill.
|
||||
// 1. FetchMatches is called
|
||||
// 2. MMF gets a backfill
|
||||
// 3. MMF grabs ticketsPerMatch tickets and makes a match with the backfill
|
||||
// 4. MMF returns the match as a result
|
||||
func statelessMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int) ([]*pb.Match, error) {
|
||||
var matches []*pb.Match
|
||||
|
||||
for pool, backfills := range poolBackfills {
|
||||
tickets, ok := poolTickets[pool]
|
||||
|
||||
if !ok || len(tickets) == 0 {
|
||||
// no tickets in pool
|
||||
continue
|
||||
}
|
||||
|
||||
for _, b := range backfills {
|
||||
l := len(tickets)
|
||||
if l == 0 {
|
||||
// no tickets left
|
||||
break
|
||||
}
|
||||
|
||||
if l > ticketsPerMatch && ticketsPerMatch > 0 {
|
||||
l = ticketsPerMatch
|
||||
}
|
||||
|
||||
matches = append(matches, &pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
Tickets: tickets[0:l],
|
||||
MatchProfile: p.GetName(),
|
||||
MatchFunction: "backfill",
|
||||
Backfill: b,
|
||||
})
|
||||
tickets = tickets[l:]
|
||||
}
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
func (s *BackfillScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
tickets := map[string]struct{}{}
|
||||
backfills := map[string]struct{}{}
|
||||
matchIds := []string{}
|
||||
|
||||
outer:
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read evaluator input stream: %w", err)
|
||||
}
|
||||
|
||||
m := req.GetMatch()
|
||||
|
||||
if _, ok := backfills[m.Backfill.Id]; ok {
|
||||
continue outer
|
||||
}
|
||||
|
||||
for _, t := range m.Tickets {
|
||||
if _, ok := tickets[t.Id]; ok {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range m.Tickets {
|
||||
tickets[t.Id] = struct{}{}
|
||||
}
|
||||
|
||||
matchIds = append(matchIds, m.GetMatchId())
|
||||
}
|
||||
|
||||
for _, id := range matchIds {
|
||||
err := stream.Send(&pb.EvaluateResponse{MatchId: id})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sending evaluator output stream: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -78,7 +78,11 @@ func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
func (b *BattleRoyalScenario) Backfill() *pb.Backfill {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
const playersInMatch = 100
|
||||
|
||||
tickets := poolTickets[poolName]
|
||||
@ -101,7 +105,7 @@ func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[
|
||||
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
used := map[string]struct{}{}
|
||||
|
||||
// TODO: once the evaluator client supports sending and recieving at the
|
||||
// TODO: once the evaluator client supports sending and receiving at the
|
||||
// same time, don't buffer, just send results immediately.
|
||||
matchIDs := []string{}
|
||||
|
||||
|
@ -33,7 +33,7 @@ func Scenario() *FirstMatchScenario {
|
||||
type FirstMatchScenario struct {
|
||||
}
|
||||
|
||||
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
|
||||
func (*FirstMatchScenario) Profiles() []*pb.MatchProfile {
|
||||
return []*pb.MatchProfile{
|
||||
{
|
||||
Name: "entirePool",
|
||||
@ -46,11 +46,15 @@ func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
|
||||
func (*FirstMatchScenario) Ticket() *pb.Ticket {
|
||||
return &pb.Ticket{}
|
||||
}
|
||||
|
||||
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
func (*FirstMatchScenario) Backfill() *pb.Backfill {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
tickets := poolTickets[poolName]
|
||||
var matches []*pb.Match
|
||||
|
||||
@ -68,10 +72,10 @@ func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[s
|
||||
|
||||
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
|
||||
// previously accepted match. Essentially first to claim the ticket wins.
|
||||
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
func (*FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
used := map[string]struct{}{}
|
||||
|
||||
// TODO: once the evaluator client supports sending and recieving at the
|
||||
// TODO: once the evaluator client supports sending and receiving at the
|
||||
// same time, don't buffer, just send results immediately.
|
||||
matchIDs := []string{}
|
||||
|
||||
|
@ -19,9 +19,8 @@ import (
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/backfill"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
|
||||
"open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/matchfunction"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
@ -40,11 +39,14 @@ type GameScenario interface {
|
||||
// Ticket creates a new ticket, with randomized parameters.
|
||||
Ticket() *pb.Ticket
|
||||
|
||||
// Backfill creates a new backfill, with randomized parameters.
|
||||
Backfill() *pb.Backfill
|
||||
|
||||
// Profiles lists all of the profiles that should run.
|
||||
Profiles() []*pb.MatchProfile
|
||||
|
||||
// MatchFunction is the custom logic implementation of the match function.
|
||||
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
|
||||
MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
|
||||
|
||||
// Evaluate is the custom logic implementation of the evaluator.
|
||||
Evaluate(stream pb.Evaluator_EvaluateServer) error
|
||||
@ -56,18 +58,26 @@ var ActiveScenario = func() *Scenario {
|
||||
|
||||
// TODO: Select which scenario to use based on some configuration or choice,
|
||||
// so it's easier to run different scenarios without changing code.
|
||||
gs = battleroyal.Scenario()
|
||||
gs = teamshooter.Scenario()
|
||||
//gs = battleroyal.Scenario()
|
||||
//gs = teamshooter.Scenario()
|
||||
s := backfill.Scenario()
|
||||
gs = s
|
||||
|
||||
return &Scenario{
|
||||
FrontendTotalTicketsToCreate: -1,
|
||||
FrontendTicketCreatedQPS: 100,
|
||||
FrontendTotalTicketsToCreate: -1,
|
||||
FrontendTicketCreatedQPS: 100,
|
||||
FrontendCreatesBackfillsOnStart: true,
|
||||
FrontendTotalBackfillsToCreate: 1000,
|
||||
FrontendDeletesTickets: true,
|
||||
|
||||
BackendAssignsTickets: true,
|
||||
BackendDeletesTickets: true,
|
||||
BackendAssignsTickets: false,
|
||||
BackendAcknowledgesBackfills: true,
|
||||
BackendDeletesBackfills: true,
|
||||
|
||||
Ticket: gs.Ticket,
|
||||
Profiles: gs.Profiles,
|
||||
Ticket: gs.Ticket,
|
||||
Backfill: gs.Backfill,
|
||||
BackfillDeleteCond: s.BackfillDeleteCond,
|
||||
Profiles: gs.Profiles,
|
||||
|
||||
MMF: queryPoolsWrapper(gs.MatchFunction),
|
||||
Evaluator: gs.Evaluate,
|
||||
@ -87,17 +97,23 @@ type Scenario struct {
|
||||
// TicketExtensionSize int
|
||||
// PendingTicketNumber int
|
||||
// MatchExtensionSize int
|
||||
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
|
||||
FrontendTicketCreatedQPS uint32
|
||||
FrontendTicketCreatedQPS uint32
|
||||
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
|
||||
FrontendTotalBackfillsToCreate int
|
||||
FrontendCreatesBackfillsOnStart bool
|
||||
FrontendDeletesTickets bool
|
||||
|
||||
// GameBackend Configs
|
||||
// ProfileNumber int
|
||||
// FilterNumber int
|
||||
BackendAssignsTickets bool
|
||||
BackendDeletesTickets bool
|
||||
BackendAssignsTickets bool
|
||||
BackendAcknowledgesBackfills bool
|
||||
BackendDeletesBackfills bool
|
||||
|
||||
Ticket func() *pb.Ticket
|
||||
Profiles func() []*pb.MatchProfile
|
||||
Ticket func() *pb.Ticket
|
||||
Backfill func() *pb.Backfill
|
||||
BackfillDeleteCond func(*pb.Backfill) bool
|
||||
Profiles func() []*pb.MatchProfile
|
||||
|
||||
MMF matchFunction
|
||||
Evaluator evaluatorFunction
|
||||
@ -122,7 +138,7 @@ func getQueryServiceGRPCClient() pb.QueryServiceClient {
|
||||
return pb.NewQueryServiceClient(conn)
|
||||
}
|
||||
|
||||
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
|
||||
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
|
||||
var q pb.QueryServiceClient
|
||||
var startQ sync.Once
|
||||
|
||||
@ -136,7 +152,12 @@ func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Tic
|
||||
return err
|
||||
}
|
||||
|
||||
proposals, err := mmf(req.GetProfile(), poolTickets)
|
||||
poolBackfills, err := matchfunction.QueryBackfillPools(stream.Context(), q, req.GetProfile().GetPools())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proposals, err := mmf(req.GetProfile(), poolBackfills, poolTickets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -154,9 +154,13 @@ func (t *TeamShooterScenario) Ticket() *pb.Ticket {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TeamShooterScenario) Backfill() *pb.Backfill {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MatchFunction puts tickets into matches based on their skill, finding the
|
||||
// required number of tickets for a game within the maximum skill difference.
|
||||
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
skill := func(t *pb.Ticket) float64 {
|
||||
return t.SearchFields.DoubleArgs[skillArg]
|
||||
}
|
||||
|
78
go.mod
78
go.mod
@ -18,56 +18,48 @@ module open-match.dev/open-match
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.47.0 // indirect
|
||||
contrib.go.opencensus.io/exporter/jaeger v0.1.0
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.6.0
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
|
||||
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
|
||||
contrib.go.opencensus.io/exporter/jaeger v0.2.1
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.7.0
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.2.0
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.13.4
|
||||
github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb
|
||||
github.com/TV4/logrus-stackdriver-formatter v0.1.0
|
||||
github.com/alicebob/miniredis/v2 v2.11.0
|
||||
github.com/apache/thrift v0.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.25.27 // indirect
|
||||
github.com/alicebob/miniredis/v2 v2.14.1
|
||||
github.com/aws/aws-sdk-go v1.35.26 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/fsnotify/fsnotify v1.4.7
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/go-redsync/redsync/v4 v4.3.0
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
|
||||
github.com/googleapis/gnostic v0.3.1 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.12.0
|
||||
github.com/imdario/mergo v0.3.8 // indirect
|
||||
github.com/json-iterator/go v1.1.8 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml v1.6.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
github.com/pseudomuto/protoc-gen-doc v1.3.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.3.0
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
github.com/rs/xid v1.2.1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/afero v1.2.1 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/afero v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.5.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
go.opencensus.io v0.22.1
|
||||
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
|
||||
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
google.golang.org/api v0.13.0 // indirect
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6
|
||||
google.golang.org/grpc v1.25.0
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.opencensus.io v0.23.0
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 // indirect
|
||||
google.golang.org/api v0.35.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210224155714-063164c882e6
|
||||
google.golang.org/grpc v1.36.0
|
||||
google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.5 // indirect
|
||||
k8s.io/api v0.0.0-20191004102255-dacd7df5a50b // kubernetes-1.13.12
|
||||
k8s.io/apimachinery v0.0.0-20191004074956-01f8b7d1121a // kubernetes-1.13.12
|
||||
k8s.io/client-go v0.0.0-20191004102537-eb5b9a8cfde7 // kubernetes-1.13.12
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.0.0-20191004102349-159aefb8556b // kubernetes-1.14.10
|
||||
k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689 // kubernetes-1.14.10
|
||||
k8s.io/client-go v11.0.1-0.20191029005444-8e4128053008+incompatible // kubernetes-1.14.10
|
||||
k8s.io/klog v1.0.0 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
@ -13,13 +13,13 @@
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v2
|
||||
appVersion: "1.1.0"
|
||||
version: 1.1.0
|
||||
appVersion: "1.4.0"
|
||||
version: 1.4.0
|
||||
name: open-match
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: 9.5.0
|
||||
repository: https://charts.helm.sh/stable
|
||||
version: 16.3.1
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: open-match-core.redis.enabled
|
||||
- name: open-match-telemetry
|
||||
version: 0.0.0-dev
|
||||
|
@ -82,6 +82,7 @@ spec:
|
||||
component: evaluator
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
|
||||
volumes:
|
||||
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.evaluatorConfigs)) | nindent 8}}
|
||||
{{- include "openmatch.volumes.tls" . | nindent 8}}
|
||||
|
@ -83,6 +83,7 @@ spec:
|
||||
component: matchfunction
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
|
||||
volumes:
|
||||
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.mmfConfigs)) | nindent 8}}
|
||||
{{- include "openmatch.volumes.tls" . | nindent 8}}
|
||||
|
@ -18,13 +18,15 @@
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 16,
|
||||
"id": 28,
|
||||
"panels": [],
|
||||
"title": "Iterations",
|
||||
"type": "row"
|
||||
},
|
||||
@ -130,11 +132,317 @@
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 16,
|
||||
"panels": [],
|
||||
"title": "Backfills",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 10
|
||||
},
|
||||
"id": 30,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_backfills_deleted[5m]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backfilld Deleted per second",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_backfill_deletes_failed[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backfill Deletions Failed per second",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Backfill Deletion",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 14,
|
||||
"panels": [],
|
||||
"title": "Tickets",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 19
|
||||
},
|
||||
"id": 26,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": false,
|
||||
"current": true,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "99%-ile",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.95, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "95%-ile",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.90, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "90%-ile",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.50, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "50%-ile",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.10, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "10%-ile",
|
||||
"refId": "E"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Ticket Time to Assignment",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "ms",
|
||||
"label": null,
|
||||
"logBase": 2,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 19
|
||||
},
|
||||
"id": 12,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backend Tickets in Matches pers second",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Tickets In Matches",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
@ -146,7 +454,7 @@
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 10
|
||||
"y": 27
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
@ -242,12 +550,12 @@
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 10
|
||||
"y": 28
|
||||
},
|
||||
"id": 12,
|
||||
"id": 22,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -272,18 +580,26 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
|
||||
"expr": "sum(rate(scale_frontend_tickets_deleted[5m]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backend Tickets Deleted per second",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(scale_frontend_ticket_deletes_failed[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backend Tickets in Matches pers second",
|
||||
"refId": "A"
|
||||
"legendFormat": "Backend Ticket Deletions Failed per second",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Tickets In Matches",
|
||||
"title": "Ticket Deletion",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@ -331,7 +647,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 19
|
||||
"y": 36
|
||||
},
|
||||
"id": 24,
|
||||
"legend": {
|
||||
@ -414,106 +730,13 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 19
|
||||
},
|
||||
"id": 22,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_tickets_deleted[5m]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backend Tickets Deleted per second",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(scale_backend_ticket_deletes_failed[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Backend Ticket Deletions Failed per second",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Ticket Deletion",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 27
|
||||
"y": 44
|
||||
},
|
||||
"id": 18,
|
||||
"panels": [],
|
||||
@ -530,7 +753,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 28
|
||||
"y": 45
|
||||
},
|
||||
"id": 6,
|
||||
"legend": {
|
||||
@ -616,7 +839,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 28
|
||||
"y": 45
|
||||
},
|
||||
"id": 19,
|
||||
"legend": {
|
||||
@ -705,7 +928,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 36
|
||||
"y": 53
|
||||
},
|
||||
"id": 21,
|
||||
"panels": [],
|
||||
@ -722,7 +945,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 37
|
||||
"y": 54
|
||||
},
|
||||
"id": 8,
|
||||
"legend": {
|
||||
@ -807,7 +1030,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 37
|
||||
"y": 54
|
||||
},
|
||||
"id": 10,
|
||||
"legend": {
|
||||
@ -890,7 +1113,7 @@
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": "",
|
||||
"refresh": "10s",
|
||||
"schemaVersion": 18,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
|
@ -31,7 +31,7 @@ spec:
|
||||
protocol: TCP
|
||||
port: {{ .Values.scaleFrontend.httpPort }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
|
||||
|
@ -102,14 +102,14 @@ resources:
|
||||
{{- end -}}
|
||||
|
||||
{{- define "openmatch.volumemounts.withredis" -}}
|
||||
{{- if .Values.redis.usePassword }}
|
||||
{{- if .Values.redis.auth.enabled }}
|
||||
- name: redis-password
|
||||
mountPath: {{ .Values.redis.secretMountPath }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "openmatch.volumes.withredis" -}}
|
||||
{{- if .Values.redis.usePassword }}
|
||||
{{- if .Values.redis.auth.enabled }}
|
||||
- name: redis-password
|
||||
secret:
|
||||
secretName: {{ include "call-nested" (list . "redis" "redis.fullname") }}
|
||||
|
@ -90,11 +90,11 @@ data:
|
||||
{{- if index .Values "redis" "sentinel" "enabled"}}
|
||||
sentinelPort: {{ .Values.redis.sentinel.port }}
|
||||
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
|
||||
sentinelHostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}
|
||||
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
|
||||
sentinelHostname: {{ include "call-nested" (list . "redis" "common.names.fullname") }}
|
||||
sentinelUsePassword: {{ .Values.redis.auth.sentinel }}
|
||||
{{- else}}
|
||||
# Open Match's default Redis setups
|
||||
hostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
|
||||
hostname: {{ include "call-nested" (list . "redis" "common.names.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
|
||||
port: {{ .Values.redis.redisPort }}
|
||||
user: {{ .Values.redis.user }}
|
||||
{{- end}}
|
||||
@ -104,7 +104,7 @@ data:
|
||||
port: {{ index .Values "open-match-core" "redis" "port" }}
|
||||
user: {{ index .Values "open-match-core" "redis" "user" }}
|
||||
{{- end }}
|
||||
usePassword: {{ .Values.redis.usePassword }}
|
||||
usePassword: {{ .Values.redis.auth.enabled }}
|
||||
passwordPath: {{ .Values.redis.secretMountPath }}/redis-password
|
||||
pool:
|
||||
maxIdle: {{ index .Values "open-match-core" "redis" "pool" "maxIdle" }}
|
||||
@ -119,8 +119,13 @@ data:
|
||||
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
|
||||
jaeger:
|
||||
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
|
||||
{{- if .Values.global.telemetry.jaeger.enabled }}
|
||||
agentEndpoint: "{{ tpl .Values.global.telemetry.jaeger.agentEndpoint . }}"
|
||||
collectorEndpoint: "{{ tpl .Values.global.telemetry.jaeger.collectorEndpoint . }}"
|
||||
{{- else }}
|
||||
agentEndpoint: ""
|
||||
collectorEndpoint: ""
|
||||
{{- end }}
|
||||
prometheus:
|
||||
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
|
||||
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"
|
||||
|
@ -40,6 +40,7 @@ data:
|
||||
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
|
||||
backfillLockTimeout: {{ index .Values "open-match-core" "backfillLockTimeout" }}
|
||||
api:
|
||||
evaluator:
|
||||
hostname: "{{ include "openmatch.evaluator.hostName" . }}"
|
||||
|
@ -74,6 +74,7 @@ roleRef:
|
||||
name: {{ include "openmatch.fullname" . }}-service-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
{{- if index .Values "open-match-core" "redis" "enabled" }}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -111,3 +112,4 @@ roleRef:
|
||||
name: {{ include "openmatch.fullname" . }}-redis-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -107,8 +107,10 @@ configs:
|
||||
# https://github.com/helm/charts/tree/master/stable/redis
|
||||
redis:
|
||||
redisPort: 6379
|
||||
usePassword: false
|
||||
usePasswordFile: false
|
||||
auth:
|
||||
enabled: false
|
||||
sentinel: false
|
||||
usePasswordFiles: false
|
||||
secretMountPath: /opt/bitnami/redis/secrets
|
||||
configmap: |
|
||||
maxclients 100000
|
||||
@ -130,11 +132,11 @@ redis:
|
||||
enabled: false
|
||||
metrics:
|
||||
enabled: true
|
||||
cluster:
|
||||
slaveCount: 3
|
||||
serviceAccount:
|
||||
create: true
|
||||
slave:
|
||||
replica:
|
||||
disableCommands: [] # don't disable 'FLUSH-' commands
|
||||
replicaCount: 3
|
||||
persistence:
|
||||
enabled: false
|
||||
resources:
|
||||
@ -188,6 +190,8 @@ open-match-core:
|
||||
assignedDeleteTimeout: 10m
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: 10000
|
||||
# Duration for redis locks to expire.
|
||||
backfillLockTimeout: 1m
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
|
@ -107,8 +107,10 @@ configs:
|
||||
# https://github.com/helm/charts/tree/master/stable/redis
|
||||
redis:
|
||||
redisPort: 6379
|
||||
usePassword: false
|
||||
usePasswordFile: false
|
||||
auth:
|
||||
enabled: false
|
||||
sentinel: false
|
||||
usePasswordFiles: false
|
||||
secretMountPath: /opt/bitnami/redis/secrets
|
||||
configmap: |
|
||||
maxclients 100000
|
||||
@ -117,16 +119,29 @@ redis:
|
||||
enabled: true
|
||||
masterSet: om-redis-master
|
||||
port: 26379
|
||||
resources:
|
||||
requests:
|
||||
memory: 300Mi
|
||||
cpu: 0.5
|
||||
master:
|
||||
disableCommands: [] # don't disable 'FLUSH-' commands
|
||||
resources:
|
||||
requests:
|
||||
memory: 300Mi
|
||||
cpu: 0.5
|
||||
replica:
|
||||
disableCommands: [] # don't disable 'FLUSH-' commands
|
||||
replicaCount: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: 300Mi
|
||||
cpu: 0.5
|
||||
metrics:
|
||||
enabled: true
|
||||
cluster:
|
||||
slaveCount: 2
|
||||
resources:
|
||||
requests:
|
||||
memory: 300Mi
|
||||
cpu: 0.5
|
||||
serviceAccount:
|
||||
create: true
|
||||
sysctlImage:
|
||||
@ -173,6 +188,8 @@ open-match-core:
|
||||
assignedDeleteTimeout: 10m
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: 10000
|
||||
# Duration for redis locks to expire.
|
||||
backfillLockTimeout: 1m
|
||||
|
||||
redis:
|
||||
enabled: true
|
||||
@ -254,7 +271,7 @@ global:
|
||||
# Use this field if you need to override the image registry and image tag for all services defined in this chart
|
||||
image:
|
||||
registry: gcr.io/open-match-public-images
|
||||
tag: 1.1.0
|
||||
tag: 1.4.0
|
||||
pullPolicy: Always
|
||||
|
||||
# Expose the telemetry configurations to all subcharts because prometheus, for example,
|
||||
|
@ -12,7 +12,7 @@ If you're making changes to these files you must check in the .tfstate file as
|
||||
well as comment the reason why you're enabling a feature or making a change.
|
||||
|
||||
## GCP Service Account Setup
|
||||
To use the terraform templates when developing Open Match, you need to have the [credential of your service account](https://www.terraform.io/docs/providers/google/provider_reference.html#credentials-1) associated with your Open Match project.
|
||||
To use the terraform templates when developing Open Match, you need to have the [credential of your service account](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_service_account) associated with your Open Match project.
|
||||
|
||||
```bash
|
||||
# Example: Generates the key file in GCP.
|
||||
|
@ -16,7 +16,7 @@ Lastly, these templates are meant for advanced users that are most likely
|
||||
already using Terraform.
|
||||
|
||||
## GCP Service Account Setup
|
||||
To use the terraform templates when developing Open Match, you need to have the [credential of your service account](https://www.terraform.io/docs/providers/google/provider_reference.html#credentials-1) associated with your Open Match project.
|
||||
To use the terraform templates when developing Open Match, you need to have the [credential of your service account](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_service_account) associated with your Open Match project.
|
||||
|
||||
```bash
|
||||
# Example: Generates the key file in GCP.
|
||||
|
26
internal/api/messages.proto
Normal file
26
internal/api/messages.proto
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package openmatch.internal;
|
||||
option go_package = "open-match.dev/open-match/internal/ipb";
|
||||
|
||||
import "api/messages.proto";
|
||||
|
||||
message BackfillInternal {
|
||||
// Represents a backfill entity which is used to fill partially full matches
|
||||
openmatch.Backfill backfill = 1;
|
||||
// List of ticket IDs associated with a current backfill
|
||||
repeated string ticket_ids = 2;
|
||||
}
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/xid"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
@ -55,6 +56,7 @@ var (
|
||||
"app": "openmatch",
|
||||
"component": "app.backend",
|
||||
})
|
||||
errBackfillGenerationMismatch = errors.New("backfill generation mismatch")
|
||||
)
|
||||
|
||||
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
|
||||
@ -89,7 +91,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
|
||||
return synchronizeSend(ctx, syncStream, m, proposals)
|
||||
})
|
||||
eg.Go(func() error {
|
||||
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
|
||||
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs, s.store)
|
||||
})
|
||||
|
||||
var mmfErr error
|
||||
@ -142,7 +144,7 @@ sendProposals:
|
||||
return nil
|
||||
}
|
||||
|
||||
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
|
||||
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc, store statestore.Service) error {
|
||||
var startMmfsOnce sync.Once
|
||||
|
||||
for {
|
||||
@ -169,6 +171,31 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
|
||||
if !ok {
|
||||
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
|
||||
}
|
||||
|
||||
backfill := match.GetBackfill()
|
||||
if backfill != nil {
|
||||
ticketIds := make([]string, 0, len(match.Tickets))
|
||||
|
||||
for _, t := range match.Tickets {
|
||||
ticketIds = append(ticketIds, t.Id)
|
||||
}
|
||||
|
||||
err = createOrUpdateBackfill(ctx, backfill, ticketIds, store)
|
||||
if err != nil {
|
||||
e, ok := status.FromError(err)
|
||||
if err == errBackfillGenerationMismatch || (ok && e.Code() == codes.NotFound) {
|
||||
err = doReleaseTickets(ctx, ticketIds, store)
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to remove match tickets from pending release: %v", ticketIds)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "failed to handle match backfill: %s", match.MatchId)
|
||||
}
|
||||
}
|
||||
|
||||
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
|
||||
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
|
||||
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
|
||||
@ -296,16 +323,25 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
|
||||
}
|
||||
|
||||
func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTicketsRequest) (*pb.ReleaseTicketsResponse, error) {
|
||||
err := s.store.DeleteTicketsFromPendingRelease(ctx, req.GetTicketIds())
|
||||
err := doReleaseTickets(ctx, req.GetTicketIds(), s.store)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
|
||||
return &pb.ReleaseTicketsResponse{}, nil
|
||||
}
|
||||
|
||||
func doReleaseTickets(ctx context.Context, ticketIds []string, store statestore.Service) error {
|
||||
err := store.DeleteTicketsFromPendingRelease(ctx, ticketIds)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Record(ctx, ticketsReleased.M(int64(len(ticketIds))))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
|
||||
err := s.store.ReleaseAllTickets(ctx)
|
||||
if err != nil {
|
||||
@ -330,6 +366,56 @@ func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicket
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func createOrUpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIds []string, store statestore.Service) error {
|
||||
if backfill.Id == "" {
|
||||
backfill.Id = xid.New().String()
|
||||
backfill.CreateTime = ptypes.TimestampNow()
|
||||
backfill.Generation = 1
|
||||
err := store.CreateBackfill(ctx, backfill, ticketIds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.IndexBackfill(ctx, backfill)
|
||||
}
|
||||
|
||||
m := store.NewMutex(backfill.Id)
|
||||
err := m.Lock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_, unlockErr := m.Unlock(ctx)
|
||||
if unlockErr != nil {
|
||||
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).WithError(unlockErr).Error("failed to make unlock")
|
||||
}
|
||||
}()
|
||||
|
||||
b, ids, err := store.GetBackfill(ctx, backfill.Id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Generation != backfill.Generation {
|
||||
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).
|
||||
WithError(errBackfillGenerationMismatch).
|
||||
Errorf("failed to update backfill, expecting: %d generation but got: %d", b.Generation, backfill.Generation)
|
||||
return errBackfillGenerationMismatch
|
||||
}
|
||||
|
||||
b.SearchFields = backfill.SearchFields
|
||||
b.Extensions = backfill.Extensions
|
||||
b.Generation++
|
||||
|
||||
err = store.UpdateBackfill(ctx, b, append(ids, ticketIds...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.IndexBackfill(ctx, b)
|
||||
}
|
||||
|
||||
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
|
||||
resp, tickets, err := store.UpdateAssignments(ctx, req)
|
||||
if err != nil {
|
||||
|
@ -63,7 +63,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
// then returns matches which don't collide with previously returned matches.
|
||||
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
matches := make([]*matchInp, 0)
|
||||
nilEvlautionInputs := 0
|
||||
nilEvaluationInputs := 0
|
||||
|
||||
for m := range in {
|
||||
// Evaluation criteria is optional, but sort it lower than any matches which
|
||||
@ -82,7 +82,7 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
nilEvlautionInputs++
|
||||
nilEvaluationInputs++
|
||||
}
|
||||
matches = append(matches, &matchInp{
|
||||
match: m,
|
||||
@ -90,16 +90,17 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
|
||||
})
|
||||
}
|
||||
|
||||
if nilEvlautionInputs > 0 {
|
||||
if nilEvaluationInputs > 0 {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"count": nilEvlautionInputs,
|
||||
"count": nilEvaluationInputs,
|
||||
}).Info("Some matches don't have the optional field evaluation_input set.")
|
||||
}
|
||||
|
||||
sort.Sort(byScore(matches))
|
||||
|
||||
d := decollider{
|
||||
ticketsUsed: make(map[string]*collidingMatch),
|
||||
ticketsUsed: make(map[string]*collidingMatch),
|
||||
backfillsUsed: make(map[string]*collidingMatch),
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
@ -121,11 +122,25 @@ type collidingMatch struct {
|
||||
}
|
||||
|
||||
type decollider struct {
|
||||
resultIDs []string
|
||||
ticketsUsed map[string]*collidingMatch
|
||||
resultIDs []string
|
||||
ticketsUsed map[string]*collidingMatch
|
||||
backfillsUsed map[string]*collidingMatch
|
||||
}
|
||||
|
||||
func (d *decollider) maybeAdd(m *matchInp) {
|
||||
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
|
||||
if cm, ok := d.backfillsUsed[m.match.Backfill.Id]; ok {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"match_id": m.match.GetMatchId(),
|
||||
"backfill_id": m.match.Backfill.Id,
|
||||
"match_score": m.inp.GetScore(),
|
||||
"colliding_match_id": cm.id,
|
||||
"colliding_match_score": cm.score,
|
||||
}).Info("Higher quality match with colliding backfill found. Rejecting match.")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range m.match.GetTickets() {
|
||||
if cm, ok := d.ticketsUsed[t.Id]; ok {
|
||||
logger.WithFields(logrus.Fields{
|
||||
@ -139,6 +154,13 @@ func (d *decollider) maybeAdd(m *matchInp) {
|
||||
}
|
||||
}
|
||||
|
||||
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
|
||||
d.backfillsUsed[m.match.Backfill.Id] = &collidingMatch{
|
||||
id: m.match.GetMatchId(),
|
||||
score: m.inp.GetScore(),
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range m.match.GetTickets() {
|
||||
d.ticketsUsed[t.Id] = &collidingMatch{
|
||||
id: m.match.GetMatchId(),
|
||||
|
@ -37,6 +37,9 @@ func TestEvaluate(t *testing.T) {
|
||||
ticket1 := &pb.Ticket{Id: "1"}
|
||||
ticket2 := &pb.Ticket{Id: "2"}
|
||||
ticket3 := &pb.Ticket{Id: "3"}
|
||||
backfill0 := &pb.Backfill{}
|
||||
backfill1 := &pb.Backfill{Id: "1"}
|
||||
backfill2 := &pb.Backfill{Id: "2"}
|
||||
|
||||
ticket12Score1 := &pb.Match{
|
||||
MatchId: "ticket12Score1",
|
||||
@ -78,6 +81,61 @@ func TestEvaluate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
ticket1Backfill0Score1 := &pb.Match{
|
||||
MatchId: "ticket1Backfill0Score1",
|
||||
Tickets: []*pb.Ticket{ticket1},
|
||||
Backfill: backfill0,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 1,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
ticket2Backfill0Score1 := &pb.Match{
|
||||
MatchId: "ticket2Backfill0Score1",
|
||||
Tickets: []*pb.Ticket{ticket2},
|
||||
Backfill: backfill0,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 1,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
ticket12Backfill1Score1 := &pb.Match{
|
||||
MatchId: "ticket12Bacfill1Score1",
|
||||
Tickets: []*pb.Ticket{ticket1, ticket2},
|
||||
Backfill: backfill1,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 1,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
ticket12Backfill1Score10 := &pb.Match{
|
||||
MatchId: "ticket12Bacfill1Score1",
|
||||
Tickets: []*pb.Ticket{ticket1, ticket2},
|
||||
Backfill: backfill1,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 10,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
ticket12Backfill2Score5 := &pb.Match{
|
||||
MatchId: "ticket12Backfill2Score5",
|
||||
Tickets: []*pb.Ticket{ticket1, ticket2},
|
||||
Backfill: backfill2,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 5,
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
testMatches []*pb.Match
|
||||
@ -108,6 +166,16 @@ func TestEvaluate(t *testing.T) {
|
||||
testMatches: []*pb.Match{ticket12Score1, ticket12Score10, ticket123Score5, ticket3Score50},
|
||||
wantMatchIDs: []string{ticket12Score10.GetMatchId(), ticket3Score50.GetMatchId()},
|
||||
},
|
||||
{
|
||||
description: "test evaluator ignores backfills with empty id",
|
||||
testMatches: []*pb.Match{ticket1Backfill0Score1, ticket2Backfill0Score1},
|
||||
wantMatchIDs: []string{ticket1Backfill0Score1.GetMatchId(), ticket2Backfill0Score1.GetMatchId()},
|
||||
},
|
||||
{
|
||||
description: "test deduplicates matches by backfill and tickets and returns match with higher score",
|
||||
testMatches: []*pb.Match{ticket12Backfill1Score1, ticket12Backfill1Score10, ticket12Backfill2Score5},
|
||||
wantMatchIDs: []string{ticket12Backfill1Score10.GetMatchId()},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -25,8 +25,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
|
||||
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
|
||||
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
|
||||
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
|
||||
totalBytesPerBackfill = stats.Int64("open-match.dev/frontend/total_bytes_per_backfill", "Total bytes per backfill", stats.UnitBytes)
|
||||
searchFieldsPerBackfill = stats.Int64("open-match.dev/frontend/searchfields_per_backfill", "Searchfields per backfill", stats.UnitDimensionless)
|
||||
|
||||
totalBytesPerTicketView = &view.View{
|
||||
Measure: totalBytesPerTicket,
|
||||
@ -40,6 +42,18 @@ var (
|
||||
Description: "SearchFields per ticket",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
totalBytesPerBackfillView = &view.View{
|
||||
Measure: totalBytesPerBackfill,
|
||||
Name: "open-match.dev/frontend/total_bytes_per_backfill",
|
||||
Description: "Total bytes per backfill",
|
||||
Aggregation: telemetry.DefaultBytesDistribution,
|
||||
}
|
||||
searchFieldsPerBackfillView = &view.View{
|
||||
Measure: searchFieldsPerBackfill,
|
||||
Name: "open-match.dev/frontend/searchfields_per_backfill",
|
||||
Description: "SearchFields per backfill",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// BindService creates the frontend service and binds it to the serving harness.
|
||||
@ -56,6 +70,8 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
b.RegisterViews(
|
||||
totalBytesPerTicketView,
|
||||
searchFieldsPerTicketView,
|
||||
totalBytesPerBackfillView,
|
||||
searchFieldsPerBackfillView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -94,6 +94,136 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
|
||||
return ticket, nil
|
||||
}
|
||||
|
||||
// CreateBackfill creates a new Backfill object.
|
||||
// it assigns an unique Id to the input Backfill and record it in state storage.
|
||||
// Set initial LastAcknowledge time for this Backfill.
|
||||
// A Backfill is considered as ready for matchmaking once it is created.
|
||||
// - If SearchFields exist in a Backfill, CreateBackfill will also index these fields such that one can query the ticket with query.QueryBackfills function.
|
||||
func (s *frontendService) CreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest) (*pb.Backfill, error) {
|
||||
// Perform input validation.
|
||||
if req == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
|
||||
}
|
||||
if req.Backfill == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
|
||||
}
|
||||
if req.Backfill.CreateTime != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "backfills cannot be created with create time set")
|
||||
}
|
||||
|
||||
return doCreateBackfill(ctx, req, s.store)
|
||||
}
|
||||
|
||||
func doCreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest, store statestore.Service) (*pb.Backfill, error) {
|
||||
// Generate an id and create a Backfill in state storage
|
||||
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "failed to clone input ticket proto")
|
||||
}
|
||||
|
||||
backfill.Id = xid.New().String()
|
||||
backfill.CreateTime = ptypes.TimestampNow()
|
||||
backfill.Generation = 1
|
||||
|
||||
sfCount := 0
|
||||
sfCount += len(backfill.GetSearchFields().GetDoubleArgs())
|
||||
sfCount += len(backfill.GetSearchFields().GetStringArgs())
|
||||
sfCount += len(backfill.GetSearchFields().GetTags())
|
||||
stats.Record(ctx, searchFieldsPerBackfill.M(int64(sfCount)))
|
||||
stats.Record(ctx, totalBytesPerBackfill.M(int64(proto.Size(backfill))))
|
||||
|
||||
err := store.CreateBackfill(ctx, backfill, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = store.IndexBackfill(ctx, backfill)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return backfill, nil
|
||||
}
|
||||
|
||||
// UpdateBackfill updates a Backfill object, if present.
|
||||
// Update would increment generation in Redis.
|
||||
// Only Extensions and SearchFields would be updated.
|
||||
// CreateTime is not changed on Update
|
||||
func (s *frontendService) UpdateBackfill(ctx context.Context, req *pb.UpdateBackfillRequest) (*pb.Backfill, error) {
|
||||
if req == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
|
||||
}
|
||||
if req.Backfill == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
|
||||
}
|
||||
|
||||
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "failed to clone input backfill proto")
|
||||
}
|
||||
|
||||
bfID := backfill.Id
|
||||
if bfID == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "backfill ID should exist")
|
||||
}
|
||||
m := s.store.NewMutex(bfID)
|
||||
|
||||
err := m.Lock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if _, err = m.Unlock(ctx); err != nil {
|
||||
logger.WithError(err).Error("error on mutex unlock")
|
||||
}
|
||||
}()
|
||||
bfStored, associatedTickets, err := s.store.GetBackfill(ctx, bfID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update generation here, because Frontend is used by GameServer only
|
||||
bfStored.SearchFields = backfill.SearchFields
|
||||
bfStored.Extensions = backfill.Extensions
|
||||
// Autoincrement generation, input backfill generation validation is performed
|
||||
// on Backend only (after MMF round)
|
||||
bfStored.Generation++
|
||||
err = s.store.UpdateBackfill(ctx, bfStored, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.store.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.store.IndexBackfill(ctx, bfStored)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"id": bfStored.Id,
|
||||
}).Error("failed to index the backfill")
|
||||
return nil, err
|
||||
}
|
||||
return bfStored, nil
|
||||
}
|
||||
|
||||
// DeleteBackfill deletes a Backfill by its ID.
|
||||
func (s *frontendService) DeleteBackfill(ctx context.Context, req *pb.DeleteBackfillRequest) (*empty.Empty, error) {
|
||||
bfID := req.GetBackfillId()
|
||||
if bfID == "" {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
|
||||
}
|
||||
|
||||
err := s.store.DeleteBackfillCompletely(ctx, bfID)
|
||||
// Deleting of Backfill is inevitable when it is expired, so we don't worry about error here
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Error("error on DeleteBackfill")
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
|
||||
// The client must delete the Ticket when finished matchmaking with it.
|
||||
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
|
||||
@ -164,6 +294,10 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
|
||||
var currAssignment *pb.Assignment
|
||||
var ok bool
|
||||
callback := func(assignment *pb.Assignment) error {
|
||||
if ctx.Err() != nil {
|
||||
return status.Errorf(codes.Aborted, ctx.Err().Error())
|
||||
}
|
||||
|
||||
if (currAssignment == nil && assignment != nil) || !proto.Equal(currAssignment, assignment) {
|
||||
currAssignment, ok = proto.Clone(assignment).(*pb.Assignment)
|
||||
if !ok {
|
||||
@ -180,3 +314,78 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
|
||||
|
||||
return store.GetAssignments(ctx, id, callback)
|
||||
}
|
||||
|
||||
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info.
|
||||
// This triggers an assignment process.
|
||||
func (s *frontendService) AcknowledgeBackfill(ctx context.Context, req *pb.AcknowledgeBackfillRequest) (*pb.AcknowledgeBackfillResponse, error) {
|
||||
if req.GetBackfillId() == "" {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
|
||||
}
|
||||
if req.GetAssignment() == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".Assignment is required")
|
||||
}
|
||||
|
||||
m := s.store.NewMutex(req.GetBackfillId())
|
||||
|
||||
err := m.Lock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if _, err = m.Unlock(ctx); err != nil {
|
||||
logger.WithError(err).Error("error on mutex unlock")
|
||||
}
|
||||
}()
|
||||
|
||||
bf, associatedTickets, err := s.store.GetBackfill(ctx, req.GetBackfillId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.store.UpdateAcknowledgmentTimestamp(ctx, req.GetBackfillId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &pb.AcknowledgeBackfillResponse{
|
||||
Backfill: bf,
|
||||
Tickets: make([]*pb.Ticket, 0),
|
||||
}
|
||||
|
||||
if len(associatedTickets) != 0 {
|
||||
setResp, tickets, err := s.store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{{TicketIds: associatedTickets, Assignment: req.GetAssignment()}},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Tickets = tickets
|
||||
|
||||
// log errors returned from UpdateAssignments to track tickets with NotFound errors
|
||||
for _, f := range setResp.Failures {
|
||||
logger.Errorf("failed to assign ticket %s, cause %d", f.TicketId, f.Cause)
|
||||
}
|
||||
for _, id := range associatedTickets {
|
||||
err = s.store.DeindexTicket(ctx, id)
|
||||
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
|
||||
if err != nil {
|
||||
logger.WithError(err).Errorf("failed to deindex ticket %s after updating the assignments", id)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all tickets associated with backfill, because unassigned tickets are not found only
|
||||
err = s.store.UpdateBackfill(ctx, bf, []string{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetBackfill fetches a Backfill object by its ID.
|
||||
func (s *frontendService) GetBackfill(ctx context.Context, req *pb.GetBackfillRequest) (*pb.Backfill, error) {
|
||||
bf, _, err := s.store.GetBackfill(ctx, req.GetBackfillId())
|
||||
return bf, err
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -81,13 +82,190 @@ func TestDoCreateTickets(t *testing.T) {
|
||||
if err == nil {
|
||||
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
|
||||
require.True(t, matched)
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateBackfill(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
fs := frontendService{cfg, store}
|
||||
var testCases = []struct {
|
||||
description string
|
||||
request *pb.CreateBackfillRequest
|
||||
result *pb.Backfill
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "nil request check",
|
||||
request: nil,
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: "request is nil",
|
||||
},
|
||||
{
|
||||
description: "nil backfill - error is returned",
|
||||
request: &pb.CreateBackfillRequest{Backfill: nil},
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: ".backfill is required",
|
||||
},
|
||||
{
|
||||
description: "createTime should not exist in input",
|
||||
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{CreateTime: ptypes.TimestampNow()}},
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: "backfills cannot be created with create time set",
|
||||
},
|
||||
{
|
||||
description: "empty Backfill, no errors",
|
||||
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "normal backfill",
|
||||
request: &pb.CreateBackfillRequest{
|
||||
Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
}}}},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
res, err := fs.CreateBackfill(ctx, tc.request)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// expect error with canceled context
|
||||
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
fs = frontendService{cfg, store}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"test-arg": 1,
|
||||
},
|
||||
},
|
||||
}})
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestUpdateBackfill(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
fs := frontendService{cfg, store}
|
||||
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{
|
||||
Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
request *pb.UpdateBackfillRequest
|
||||
result *pb.Backfill
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "nil request check",
|
||||
request: nil,
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: "request is nil",
|
||||
},
|
||||
{
|
||||
description: "nil backfill - error is returned",
|
||||
request: &pb.UpdateBackfillRequest{Backfill: nil},
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: ".backfill is required",
|
||||
},
|
||||
{
|
||||
description: "empty Backfill, error with no backfill ID",
|
||||
request: &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{}},
|
||||
expectedCode: codes.InvalidArgument,
|
||||
expectedMessage: "backfill ID should exist",
|
||||
},
|
||||
{
|
||||
description: "normal backfill",
|
||||
request: &pb.UpdateBackfillRequest{
|
||||
Backfill: &pb.Backfill{
|
||||
Id: res.Id,
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
}}}},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
res, err = fs.UpdateBackfill(ctx, tc.request)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.Equal(t, tc.request.Backfill.SearchFields.DoubleArgs, res.SearchFields.DoubleArgs)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// expect error with canceled context
|
||||
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
fs = frontendService{cfg, store}
|
||||
defer closer()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
res, err = fs.UpdateBackfill(ctx, &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{
|
||||
Id: res.Id,
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"test-arg": 1,
|
||||
},
|
||||
},
|
||||
}})
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, codes.Unknown.String(), status.Convert(err).Code().String())
|
||||
require.Nil(t, res)
|
||||
}
|
||||
|
||||
func TestDoWatchAssignments(t *testing.T) {
|
||||
testTicket := &pb.Ticket{
|
||||
Id: "test-id",
|
||||
@ -131,7 +309,7 @@ func TestDoWatchAssignments(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
wg.Done()
|
||||
}
|
||||
}(wg)
|
||||
@ -165,6 +343,89 @@ func TestDoWatchAssignments(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcknowledgeBackfillValidation - test input validation only
|
||||
func TestAcknowledgeBackfillValidation(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
tests := []struct {
|
||||
description string
|
||||
request *pb.AcknowledgeBackfillRequest
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "no BackfillId, error is expected",
|
||||
request: &pb.AcknowledgeBackfillRequest{BackfillId: "", Assignment: &pb.Assignment{Connection: "10.0.0.1"}},
|
||||
expectedMessage: ".BackfillId is required",
|
||||
},
|
||||
{
|
||||
description: "no Assignment, error is expected",
|
||||
request: &pb.AcknowledgeBackfillRequest{BackfillId: "1234", Assignment: nil},
|
||||
expectedMessage: ".Assignment is required",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
fs := frontendService{cfg, store}
|
||||
bf, err := fs.AcknowledgeBackfill(ctx, test.request)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Convert(err).Code().String())
|
||||
require.Equal(t, test.expectedMessage, status.Convert(err).Message())
|
||||
require.Nil(t, bf)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcknowledgeBackfill verifies timestamp part of AcknowledgeBackfill call,
|
||||
// assignment part tested in a corresponding E2E test.
|
||||
// Expired backfill can not be acknowledged
|
||||
func TestAcknowledgeBackfill(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
ctx := context.Background()
|
||||
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
|
||||
fakeBackfill := &pb.Backfill{
|
||||
Id: "1",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"test-arg": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
|
||||
require.NoError(t, err)
|
||||
fs := frontendService{cfg, store}
|
||||
|
||||
resp, err := fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Backfill)
|
||||
require.NotNil(t, resp.Tickets)
|
||||
|
||||
// Use wrong BackfillID, error is returned
|
||||
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: "42", Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
|
||||
require.Error(t, err)
|
||||
require.Nil(t, resp)
|
||||
require.Equal(t, "Backfill id: 42 not found", status.Convert(err).Message())
|
||||
|
||||
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
|
||||
ids, err := store.GetExpiredBackfillIDs(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ids, 1)
|
||||
|
||||
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "can not acknowledge an expired backfill, id: 1")
|
||||
|
||||
}
|
||||
|
||||
func TestDoDeleteTicket(t *testing.T) {
|
||||
fakeTicket := &pb.Ticket{
|
||||
Id: "1",
|
||||
@ -274,3 +535,109 @@ func TestDoGetTicket(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBackfill(t *testing.T) {
|
||||
fakeBackfill := &pb.Backfill{
|
||||
Id: "1",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"test-arg": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg := viper.New()
|
||||
tests := []struct {
|
||||
description string
|
||||
preAction func(context.Context, context.CancelFunc, statestore.Service)
|
||||
wantTicket *pb.Backfill
|
||||
wantCode codes.Code
|
||||
}{
|
||||
{
|
||||
description: "expect unavailable code since context is canceled before being called",
|
||||
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
|
||||
cancel()
|
||||
},
|
||||
wantCode: codes.Unavailable,
|
||||
},
|
||||
{
|
||||
description: "expect not found code since ticket does not exist",
|
||||
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
|
||||
wantCode: codes.NotFound,
|
||||
},
|
||||
{
|
||||
description: "expect ok code with output ticket equivalent to fakeBackfill",
|
||||
preAction: func(ctx context.Context, _ context.CancelFunc, store statestore.Service) {
|
||||
store.CreateBackfill(ctx, fakeBackfill, []string{})
|
||||
},
|
||||
wantCode: codes.OK,
|
||||
wantTicket: fakeBackfill,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
|
||||
defer closer()
|
||||
fs := frontendService{cfg, store}
|
||||
|
||||
test.preAction(ctx, cancel, store)
|
||||
|
||||
backfill, err := fs.GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: fakeBackfill.GetId()})
|
||||
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, test.wantTicket.GetId(), backfill.GetId())
|
||||
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, backfill.SearchFields.DoubleArgs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoDeleteBackfill(t *testing.T) {
|
||||
fakeBackfill := &pb.Backfill{
|
||||
Id: "1",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"test-arg": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
|
||||
defer closer()
|
||||
ctx := context.Background()
|
||||
|
||||
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := viper.New()
|
||||
fs := frontendService{cfg, store}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
id string
|
||||
wantCode codes.Code
|
||||
}{
|
||||
{
|
||||
description: "expect ok code since delete backfill does not care about if backfill exists or not",
|
||||
id: "222",
|
||||
wantCode: codes.OK,
|
||||
},
|
||||
{
|
||||
description: "expect ok code",
|
||||
id: "1",
|
||||
wantCode: codes.OK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
_, err := fs.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: fakeBackfill.GetId()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
250
internal/app/query/cache.go
Normal file
250
internal/app/query/cache.go
Normal file
@ -0,0 +1,250 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
// cache unifies concurrent requests into a single cache update, and
|
||||
// gives a safe view into that map cache.
|
||||
type cache struct {
|
||||
store statestore.Service
|
||||
requests chan *cacheRequest
|
||||
// Single item buffered channel. Holds a value when runQuery can be safely
|
||||
// started. Basically a channel/select friendly mutex around runQuery
|
||||
// running.
|
||||
startRunRequest chan struct{}
|
||||
wg sync.WaitGroup
|
||||
// Multithreaded unsafe fields, only to be written by update, and read when
|
||||
// request given the ok.
|
||||
value interface{}
|
||||
update func(statestore.Service, interface{}) error
|
||||
err error
|
||||
}
|
||||
|
||||
type cacheRequest struct {
|
||||
ctx context.Context
|
||||
runNow chan struct{}
|
||||
}
|
||||
|
||||
func (c *cache) request(ctx context.Context, f func(interface{})) error {
|
||||
cr := &cacheRequest{
|
||||
ctx: ctx,
|
||||
runNow: make(chan struct{}),
|
||||
}
|
||||
|
||||
sendRequest:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "cache request canceled before request sent.")
|
||||
case <-c.startRunRequest:
|
||||
go c.runRequest()
|
||||
case c.requests <- cr:
|
||||
break sendRequest
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "cache request canceled waiting for access.")
|
||||
case <-cr.runNow:
|
||||
defer c.wg.Done()
|
||||
}
|
||||
|
||||
if c.err != nil {
|
||||
return c.err
|
||||
}
|
||||
|
||||
f(c.value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cache) runRequest() {
|
||||
defer func() {
|
||||
c.startRunRequest <- struct{}{}
|
||||
}()
|
||||
|
||||
// Wait for first query request.
|
||||
reqs := []*cacheRequest{<-c.requests}
|
||||
|
||||
// Collect all waiting queries.
|
||||
collectAllWaiting:
|
||||
for {
|
||||
select {
|
||||
case req := <-c.requests:
|
||||
reqs = append(reqs, req)
|
||||
default:
|
||||
break collectAllWaiting
|
||||
}
|
||||
}
|
||||
|
||||
c.err = c.update(c.store, c.value)
|
||||
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
|
||||
|
||||
// Send WaitGroup to query calls, letting them run their query on the cache.
|
||||
for _, req := range reqs {
|
||||
c.wg.Add(1)
|
||||
select {
|
||||
case req.runNow <- struct{}{}:
|
||||
case <-req.ctx.Done():
|
||||
c.wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// wait for requests to finish using cache.
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
||||
func newTicketCache(b *appmain.Bindings, store statestore.Service) *cache {
|
||||
c := &cache{
|
||||
store: store,
|
||||
requests: make(chan *cacheRequest),
|
||||
startRunRequest: make(chan struct{}, 1),
|
||||
value: make(map[string]*pb.Ticket),
|
||||
update: updateTicketCache,
|
||||
}
|
||||
|
||||
c.startRunRequest <- struct{}{}
|
||||
b.AddHealthCheckFunc(c.store.HealthCheck)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func updateTicketCache(store statestore.Service, value interface{}) error {
|
||||
if value == nil {
|
||||
return status.Error(codes.InvalidArgument, "value is required")
|
||||
}
|
||||
|
||||
tickets, ok := value.(map[string]*pb.Ticket)
|
||||
if !ok {
|
||||
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Ticket, but got: %T", value)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
previousCount := len(tickets)
|
||||
currentAll, err := store.GetIndexedIDSet(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletedCount := 0
|
||||
for id := range tickets {
|
||||
if _, ok := currentAll[id]; !ok {
|
||||
delete(tickets, id)
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
|
||||
toFetch := []string{}
|
||||
for id := range currentAll {
|
||||
if _, ok := tickets[id]; !ok {
|
||||
toFetch = append(toFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
newTickets, err := store.GetTickets(context.Background(), toFetch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, t := range newTickets {
|
||||
tickets[t.Id] = t
|
||||
}
|
||||
|
||||
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
|
||||
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
|
||||
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
|
||||
|
||||
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tickets))
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBackfillCache(b *appmain.Bindings, store statestore.Service) *cache {
|
||||
c := &cache{
|
||||
store: store,
|
||||
requests: make(chan *cacheRequest),
|
||||
startRunRequest: make(chan struct{}, 1),
|
||||
value: make(map[string]*pb.Backfill),
|
||||
update: updateBackfillCache,
|
||||
}
|
||||
|
||||
c.startRunRequest <- struct{}{}
|
||||
b.AddHealthCheckFunc(c.store.HealthCheck)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func updateBackfillCache(store statestore.Service, value interface{}) error {
|
||||
if value == nil {
|
||||
return status.Error(codes.InvalidArgument, "value is required")
|
||||
}
|
||||
|
||||
backfills, ok := value.(map[string]*pb.Backfill)
|
||||
if !ok {
|
||||
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Backfill, but got: %T", value)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
previousCount := len(backfills)
|
||||
index, err := store.GetIndexedBackfills(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletedCount := 0
|
||||
for id, backfill := range backfills {
|
||||
generation, ok := index[id]
|
||||
if !ok || backfill.Generation < int64(generation) {
|
||||
delete(backfills, id)
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
|
||||
toFetch := []string{}
|
||||
for id := range index {
|
||||
if _, ok := backfills[id]; !ok {
|
||||
toFetch = append(toFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
fetchedBackfills, err := store.GetBackfills(context.Background(), toFetch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, b := range fetchedBackfills {
|
||||
backfills[b.Id] = b
|
||||
}
|
||||
|
||||
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
|
||||
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
|
||||
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
|
||||
|
||||
logger.Debugf("Backfill Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(backfills))
|
||||
return nil
|
||||
}
|
@ -19,13 +19,15 @@ import (
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
|
||||
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
|
||||
backfillsPerQuery = stats.Int64("open-match.dev/query/backfills_per_query", "Number of backfills per query", stats.UnitDimensionless)
|
||||
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of items query service cached", stats.UnitDimensionless)
|
||||
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
|
||||
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
|
||||
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
|
||||
@ -36,10 +38,16 @@ var (
|
||||
Description: "Tickets per query",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
backfillsPerQueryView = &view.View{
|
||||
Measure: ticketsPerQuery,
|
||||
Name: "open-match.dev/query/backfills_per_query",
|
||||
Description: "Backfills per query",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
cacheTotalItemsView = &view.View{
|
||||
Measure: cacheTotalItems,
|
||||
Name: "open-match.dev/query/total_cached_items",
|
||||
Description: "Total number of cached tickets",
|
||||
Description: "Total number of cached items",
|
||||
Aggregation: view.LastValue(),
|
||||
}
|
||||
cacheFetchedItemsView = &view.View{
|
||||
@ -70,9 +78,11 @@ var (
|
||||
|
||||
// BindService creates the query service and binds it to the serving harness.
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
store := statestore.New(p.Config())
|
||||
service := &queryService{
|
||||
cfg: p.Config(),
|
||||
tc: newTicketCache(b, p.Config()),
|
||||
tc: newTicketCache(b, store),
|
||||
bc: newBackfillCache(b, store),
|
||||
}
|
||||
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
@ -80,6 +90,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
}, pb.RegisterQueryServiceHandlerFromEndpoint)
|
||||
b.RegisterViews(
|
||||
ticketsPerQueryView,
|
||||
backfillsPerQueryView,
|
||||
cacheTotalItemsView,
|
||||
cacheUpdateView,
|
||||
cacheFetchedItemsView,
|
||||
|
@ -15,20 +15,14 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/filter"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
@ -40,10 +34,11 @@ var (
|
||||
)
|
||||
|
||||
// queryService API provides utility functions for common MMF functionality such
|
||||
// as retreiving Tickets from state storage.
|
||||
// as retrieving Tickets from state storage.
|
||||
type queryService struct {
|
||||
cfg config.View
|
||||
tc *ticketCache
|
||||
tc *cache
|
||||
bc *cache
|
||||
}
|
||||
|
||||
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
|
||||
@ -59,7 +54,13 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
|
||||
}
|
||||
|
||||
var results []*pb.Ticket
|
||||
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
|
||||
err = s.tc.request(ctx, func(value interface{}) {
|
||||
tickets, ok := value.(map[string]*pb.Ticket)
|
||||
if !ok {
|
||||
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ticket := range tickets {
|
||||
if pf.In(ticket) {
|
||||
results = append(results, ticket)
|
||||
@ -103,7 +104,13 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
|
||||
}
|
||||
|
||||
var results []string
|
||||
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
|
||||
err = s.tc.request(ctx, func(value interface{}) {
|
||||
tickets, ok := value.(map[string]*pb.Ticket)
|
||||
if !ok {
|
||||
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
|
||||
return
|
||||
}
|
||||
|
||||
for id, ticket := range tickets {
|
||||
if pf.In(ticket) {
|
||||
results = append(results, id)
|
||||
@ -134,6 +141,56 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *queryService) QueryBackfills(req *pb.QueryBackfillsRequest, responseServer pb.QueryService_QueryBackfillsServer) error {
|
||||
ctx := responseServer.Context()
|
||||
pool := req.GetPool()
|
||||
if pool == nil {
|
||||
return status.Error(codes.InvalidArgument, ".pool is required")
|
||||
}
|
||||
|
||||
pf, err := filter.NewPoolFilter(pool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var results []*pb.Backfill
|
||||
err = s.bc.request(ctx, func(value interface{}) {
|
||||
backfills, ok := value.(map[string]*pb.Backfill)
|
||||
if !ok {
|
||||
logger.Errorf("expecting value type map[string]*pb.Backfill, but got: %T", value)
|
||||
return
|
||||
}
|
||||
|
||||
for _, backfill := range backfills {
|
||||
if pf.In(backfill) {
|
||||
results = append(results, backfill)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "QueryBackfills: failed to run request")
|
||||
return err
|
||||
}
|
||||
stats.Record(ctx, backfillsPerQuery.M(int64(len(results))))
|
||||
|
||||
pSize := getPageSize(s.cfg)
|
||||
for start := 0; start < len(results); start += pSize {
|
||||
end := start + pSize
|
||||
if end > len(results) {
|
||||
end = len(results)
|
||||
}
|
||||
|
||||
err := responseServer.Send(&pb.QueryBackfillsResponse{
|
||||
Backfills: results[start:end],
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPageSize(cfg config.View) int {
|
||||
const (
|
||||
name = "queryPageSize"
|
||||
@ -165,159 +222,3 @@ func getPageSize(cfg config.View) int {
|
||||
|
||||
return pSize
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// ticketCache unifies concurrent requests into a single cache update, and
|
||||
// gives a safe view into that map cache.
|
||||
type ticketCache struct {
|
||||
store statestore.Service
|
||||
|
||||
requests chan *cacheRequest
|
||||
|
||||
// Single item buffered channel. Holds a value when runQuery can be safely
|
||||
// started. Basically a channel/select friendly mutex around runQuery
|
||||
// running.
|
||||
startRunRequest chan struct{}
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Mutlithreaded unsafe fields, only to be written by update, and read when
|
||||
// request given the ok.
|
||||
tickets map[string]*pb.Ticket
|
||||
err error
|
||||
}
|
||||
|
||||
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
|
||||
tc := &ticketCache{
|
||||
store: statestore.New(cfg),
|
||||
requests: make(chan *cacheRequest),
|
||||
startRunRequest: make(chan struct{}, 1),
|
||||
tickets: make(map[string]*pb.Ticket),
|
||||
}
|
||||
|
||||
tc.startRunRequest <- struct{}{}
|
||||
b.AddHealthCheckFunc(tc.store.HealthCheck)
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
type cacheRequest struct {
|
||||
ctx context.Context
|
||||
runNow chan struct{}
|
||||
}
|
||||
|
||||
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
|
||||
cr := &cacheRequest{
|
||||
ctx: ctx,
|
||||
runNow: make(chan struct{}),
|
||||
}
|
||||
|
||||
sendRequest:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
|
||||
case <-tc.startRunRequest:
|
||||
go tc.runRequest()
|
||||
case tc.requests <- cr:
|
||||
break sendRequest
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
|
||||
case <-cr.runNow:
|
||||
defer tc.wg.Done()
|
||||
}
|
||||
|
||||
if tc.err != nil {
|
||||
return tc.err
|
||||
}
|
||||
|
||||
f(tc.tickets)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *ticketCache) runRequest() {
|
||||
defer func() {
|
||||
tc.startRunRequest <- struct{}{}
|
||||
}()
|
||||
|
||||
// Wait for first query request.
|
||||
reqs := []*cacheRequest{<-tc.requests}
|
||||
|
||||
// Collect all waiting queries.
|
||||
collectAllWaiting:
|
||||
for {
|
||||
select {
|
||||
case req := <-tc.requests:
|
||||
reqs = append(reqs, req)
|
||||
default:
|
||||
break collectAllWaiting
|
||||
}
|
||||
}
|
||||
|
||||
tc.update()
|
||||
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
|
||||
|
||||
// Send WaitGroup to query calls, letting them run their query on the ticket
|
||||
// cache.
|
||||
for _, req := range reqs {
|
||||
tc.wg.Add(1)
|
||||
select {
|
||||
case req.runNow <- struct{}{}:
|
||||
case <-req.ctx.Done():
|
||||
tc.wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// wait for requests to finish using ticket cache.
|
||||
tc.wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *ticketCache) update() {
|
||||
st := time.Now()
|
||||
previousCount := len(tc.tickets)
|
||||
|
||||
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
|
||||
if err != nil {
|
||||
tc.err = err
|
||||
return
|
||||
}
|
||||
|
||||
deletedCount := 0
|
||||
for id := range tc.tickets {
|
||||
if _, ok := currentAll[id]; !ok {
|
||||
delete(tc.tickets, id)
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
|
||||
toFetch := []string{}
|
||||
|
||||
for id := range currentAll {
|
||||
if _, ok := tc.tickets[id]; !ok {
|
||||
toFetch = append(toFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
|
||||
if err != nil {
|
||||
tc.err = err
|
||||
return
|
||||
}
|
||||
|
||||
for _, t := range newTickets {
|
||||
tc.tickets[t.Id] = t
|
||||
}
|
||||
|
||||
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
|
||||
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
|
||||
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
|
||||
|
||||
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
|
||||
tc.err = nil
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ var (
|
||||
// -> m2c ->
|
||||
// remember return channel m7c for match | fanInFanOut
|
||||
// -> m3c ->
|
||||
// setmappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
|
||||
// set mappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
|
||||
// -> m4c -> (buffered)
|
||||
// send to evaluator | wrapEvaluator
|
||||
// -> m5c -> (buffered)
|
||||
@ -113,7 +113,7 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
|
||||
registration.allM1cSent.Done()
|
||||
return
|
||||
}
|
||||
registration.m1c.send(mAndM6c{m: req.Proposal, m7c: registration.m7c})
|
||||
registration.m1c.send(mAndM7c{m: req.Proposal, m7c: registration.m7c})
|
||||
}
|
||||
}()
|
||||
|
||||
@ -212,7 +212,7 @@ func (s *synchronizerService) runCycle() {
|
||||
/////////////////////////////////////// Initialize cycle
|
||||
ctx, cancel := contextcause.WithCancelCause(context.Background())
|
||||
|
||||
m2c := make(chan mAndM6c)
|
||||
m2c := make(chan mAndM7c)
|
||||
m3c := make(chan *pb.Match)
|
||||
m4c := make(chan *pb.Match)
|
||||
m5c := make(chan string)
|
||||
@ -289,17 +289,24 @@ Registration:
|
||||
r.cancelMmfs <- struct{}{}
|
||||
}
|
||||
})
|
||||
|
||||
<-closedOnCycleEnd
|
||||
|
||||
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
|
||||
|
||||
// Clean up in case it was never needed.
|
||||
cancelProposalCollection.Stop()
|
||||
|
||||
err := s.store.CleanupBackfills(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to clean up backfills, %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////
|
||||
///////////////////////////////////////
|
||||
|
||||
type mAndM6c struct {
|
||||
type mAndM7c struct {
|
||||
m *pb.Match
|
||||
m7c chan string
|
||||
}
|
||||
@ -309,10 +316,10 @@ type mAndM6c struct {
|
||||
// This channel is remembered in a map, and the match is passed to be evaluated.
|
||||
// When a match returns from evaluation, it's ID is looked up in the map and the
|
||||
// match is returned on that channel.
|
||||
func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
|
||||
m6cMap := make(map[string]chan<- string)
|
||||
func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
|
||||
m7cMap := make(map[string]chan<- string)
|
||||
|
||||
defer func(m2c <-chan mAndM6c) {
|
||||
defer func(m2c <-chan mAndM7c) {
|
||||
for range m2c {
|
||||
}
|
||||
}(m2c)
|
||||
@ -321,7 +328,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
|
||||
select {
|
||||
case m2, ok := <-m2c:
|
||||
if ok {
|
||||
m6cMap[m2.m.GetMatchId()] = m2.m7c
|
||||
m7cMap[m2.m.GetMatchId()] = m2.m7c
|
||||
m3c <- m2.m
|
||||
} else {
|
||||
close(m3c)
|
||||
@ -334,7 +341,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
|
||||
return
|
||||
}
|
||||
|
||||
m7c, ok := m6cMap[m5]
|
||||
m7c, ok := m7cMap[m5]
|
||||
if ok {
|
||||
m7c <- m5
|
||||
} else {
|
||||
@ -350,8 +357,8 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
|
||||
///////////////////////////////////////
|
||||
|
||||
type cutoffSender struct {
|
||||
m1c chan<- mAndM6c
|
||||
m2c chan<- mAndM6c
|
||||
m1c chan<- mAndM7c
|
||||
m2c chan<- mAndM7c
|
||||
closed chan struct{}
|
||||
closeOnce sync.Once
|
||||
}
|
||||
@ -359,8 +366,8 @@ type cutoffSender struct {
|
||||
// cutoffSender allows values to be passed on the provided channel until cutoff
|
||||
// has been called. This closed the provided channel. Calls to send after
|
||||
// cutoff work, but values are ignored.
|
||||
func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
|
||||
m1c := make(chan mAndM6c)
|
||||
func newCutoffSender(m2c chan<- mAndM7c) *cutoffSender {
|
||||
m1c := make(chan mAndM7c)
|
||||
c := &cutoffSender{
|
||||
m1c: m1c,
|
||||
m2c: m2c,
|
||||
@ -383,7 +390,7 @@ func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
|
||||
}
|
||||
|
||||
// send passes the value on the channel if still open, otherwise does nothing.
|
||||
func (c *cutoffSender) send(match mAndM6c) {
|
||||
func (c *cutoffSender) send(match mAndM7c) {
|
||||
select {
|
||||
case <-c.closed:
|
||||
case c.m1c <- match:
|
||||
@ -436,7 +443,7 @@ func getTicketIds(tickets []*pb.Ticket) []string {
|
||||
|
||||
// Calls statestore to add all of the tickets returned by the evaluator to the
|
||||
// pendingRelease list. If it partially fails for whatever reason (not all tickets will
|
||||
// nessisarily be in the same call), only the matches which can be safely
|
||||
// necessarily be in the same call), only the matches which can be safely
|
||||
// returned to the Synchronize calls are.
|
||||
func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
|
||||
totalMatches := 0
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build !race
|
||||
// +build !race
|
||||
|
||||
// Copyright 2019 Google LLC
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@ -72,16 +73,23 @@ func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type filteredEntity interface {
|
||||
GetId() string
|
||||
GetSearchFields() *pb.SearchFields
|
||||
GetCreateTime() *timestamp.Timestamp
|
||||
}
|
||||
|
||||
// In returns true if the Ticket meets all the criteria for this PoolFilter.
|
||||
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
|
||||
s := ticket.GetSearchFields()
|
||||
func (pf *PoolFilter) In(entity filteredEntity) bool {
|
||||
s := entity.GetSearchFields()
|
||||
|
||||
if s == nil {
|
||||
s = emptySearchFields
|
||||
}
|
||||
|
||||
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
|
||||
// CreateTime is only populated by Open Match and hence expected to be valid.
|
||||
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
|
||||
if ct, err := ptypes.Timestamp(entity.GetCreateTime()); err == nil {
|
||||
if !pf.CreatedAfter.IsZero() {
|
||||
if !ct.After(pf.CreatedAfter) {
|
||||
return false
|
||||
@ -96,7 +104,7 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
|
||||
} else {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"id": ticket.GetId(),
|
||||
"id": entity.GetId(),
|
||||
}).Error("failed to get time from Timestamp proto")
|
||||
}
|
||||
}
|
||||
@ -106,10 +114,27 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Not simplified so that NaN cases are handled correctly.
|
||||
if !(v >= f.Min && v <= f.Max) {
|
||||
return false
|
||||
|
||||
switch f.Exclude {
|
||||
case pb.DoubleRangeFilter_NONE:
|
||||
// Not simplified so that NaN cases are handled correctly.
|
||||
if !(v >= f.Min && v <= f.Max) {
|
||||
return false
|
||||
}
|
||||
case pb.DoubleRangeFilter_MIN:
|
||||
if !(v > f.Min && v <= f.Max) {
|
||||
return false
|
||||
}
|
||||
case pb.DoubleRangeFilter_MAX:
|
||||
if !(v >= f.Min && v < f.Max) {
|
||||
return false
|
||||
}
|
||||
case pb.DoubleRangeFilter_BOTH:
|
||||
if !(v > f.Min && v < f.Max) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, f := range pf.StringEqualsFilters {
|
||||
|
@ -27,33 +27,53 @@ import (
|
||||
)
|
||||
|
||||
func TestMeetsCriteria(t *testing.T) {
|
||||
testInclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
|
||||
pf, err := NewPoolFilter(pool)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pf)
|
||||
|
||||
if !pf.In(entity) {
|
||||
t.Error("entity should be included in the pool")
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range testcases.IncludedTestCases() {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
pf, err := NewPoolFilter(tc.Pool)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pf)
|
||||
|
||||
tc.Ticket.CreateTime = ptypes.TimestampNow()
|
||||
if !pf.In(tc.Ticket) {
|
||||
t.Error("ticket should be included in the pool")
|
||||
}
|
||||
testInclusion(t, tc.Pool, &pb.Ticket{
|
||||
SearchFields: tc.SearchFields,
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
})
|
||||
testInclusion(t, tc.Pool, &pb.Backfill{
|
||||
SearchFields: tc.SearchFields,
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
testExclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
|
||||
pf, err := NewPoolFilter(pool)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pf)
|
||||
|
||||
if pf.In(entity) {
|
||||
t.Error("ticket should be excluded from the pool")
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range testcases.ExcludedTestCases() {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
pf, err := NewPoolFilter(tc.Pool)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, pf)
|
||||
|
||||
tc.Ticket.CreateTime = ptypes.TimestampNow()
|
||||
if pf.In(tc.Ticket) {
|
||||
t.Error("ticket should be excluded from the pool")
|
||||
}
|
||||
testExclusion(t, tc.Pool, &pb.Ticket{
|
||||
SearchFields: tc.SearchFields,
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
})
|
||||
testExclusion(t, tc.Pool, &pb.Backfill{
|
||||
SearchFields: tc.SearchFields,
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
|
||||
// TestCase defines a single filtering test case to run.
|
||||
type TestCase struct {
|
||||
Name string
|
||||
Ticket *pb.Ticket
|
||||
Pool *pb.Pool
|
||||
Name string
|
||||
SearchFields *pb.SearchFields
|
||||
Pool *pb.Pool
|
||||
}
|
||||
|
||||
// IncludedTestCases returns a list of test cases where using the given filter,
|
||||
@ -39,22 +39,38 @@ func IncludedTestCases() []TestCase {
|
||||
return []TestCase{
|
||||
{
|
||||
"no filters or fields",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{},
|
||||
},
|
||||
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10),
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5),
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_NONE),
|
||||
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MIN),
|
||||
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MAX),
|
||||
|
||||
simpleDoubleRange("excludeNone", 0, 0, 1, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("excludeNone", 1, 0, 1, pb.DoubleRangeFilter_NONE),
|
||||
|
||||
simpleDoubleRange("excludeMin", 1, 0, 1, pb.DoubleRangeFilter_MIN),
|
||||
|
||||
simpleDoubleRange("excludeMax", 0, 0, 1, pb.DoubleRangeFilter_MAX),
|
||||
|
||||
simpleDoubleRange("excludeBoth", 2, 0, 3, pb.DoubleRangeFilter_BOTH),
|
||||
simpleDoubleRange("excludeBoth", 1, 0, 3, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
{
|
||||
"String equals simple positive",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -69,11 +85,9 @@ func IncludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"TagPresent simple positive",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"mytag",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
Tags: []string{
|
||||
"mytag",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -87,11 +101,9 @@ func IncludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"TagPresent multiple all present",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -113,21 +125,21 @@ func IncludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"CreatedBefore simple positive",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"CreatedAfter simple positive",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Between CreatedBefore and CreatedAfter positive",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
|
||||
@ -135,7 +147,7 @@ func IncludedTestCases() []TestCase {
|
||||
},
|
||||
{
|
||||
"No time search criteria positive",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{},
|
||||
},
|
||||
}
|
||||
@ -148,7 +160,7 @@ func ExcludedTestCases() []TestCase {
|
||||
return []TestCase{
|
||||
{
|
||||
"DoubleRange no SearchFields",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
@ -161,7 +173,7 @@ func ExcludedTestCases() []TestCase {
|
||||
},
|
||||
{
|
||||
"StringEquals no SearchFields",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
@ -173,7 +185,7 @@ func ExcludedTestCases() []TestCase {
|
||||
},
|
||||
{
|
||||
"TagPresent no SearchFields",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
@ -182,14 +194,11 @@ func ExcludedTestCases() []TestCase {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"double range missing field",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"otherfield": 0,
|
||||
},
|
||||
&pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"otherfield": 0,
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -203,22 +212,66 @@ func ExcludedTestCases() []TestCase {
|
||||
},
|
||||
},
|
||||
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10),
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10),
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
simpleDoubleRange("valueIsMax", 1, 0, 1, pb.DoubleRangeFilter_MAX),
|
||||
simpleDoubleRange("valueIsMin", 0, 0, 1, pb.DoubleRangeFilter_MIN),
|
||||
simpleDoubleRange("excludeBoth", 0, 0, 1, pb.DoubleRangeFilter_BOTH),
|
||||
simpleDoubleRange("excludeBoth", 1, 0, 1, pb.DoubleRangeFilter_BOTH),
|
||||
|
||||
{
|
||||
"String equals simple negative", // and case sensitivity
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -233,11 +286,9 @@ func ExcludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"String equals missing field",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"otherfield": "othervalue",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"otherfield": "othervalue",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -252,11 +303,9 @@ func ExcludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"TagPresent simple negative", // and case sensitivity
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"MYTAG",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
Tags: []string{
|
||||
"MYTAG",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -270,11 +319,9 @@ func ExcludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"TagPresent multiple with one missing",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
&pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -294,21 +341,21 @@ func ExcludedTestCases() []TestCase {
|
||||
|
||||
{
|
||||
"CreatedBefore simple negative",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"CreatedAfter simple negative",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Created before time range negative",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
|
||||
@ -316,7 +363,7 @@ func ExcludedTestCases() []TestCase {
|
||||
},
|
||||
{
|
||||
"Created after time range negative",
|
||||
&pb.Ticket{},
|
||||
nil,
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
|
||||
@ -329,14 +376,12 @@ func ExcludedTestCases() []TestCase {
|
||||
}
|
||||
}
|
||||
|
||||
func simpleDoubleRange(name string, value, min, max float64) TestCase {
|
||||
func simpleDoubleRange(name string, value, min, max float64, exclude pb.DoubleRangeFilter_Exclude) TestCase {
|
||||
return TestCase{
|
||||
"double range " + name,
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"field": value,
|
||||
},
|
||||
&pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"field": value,
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
@ -345,6 +390,7 @@ func simpleDoubleRange(name string, value, min, max float64) TestCase {
|
||||
DoubleArg: "field",
|
||||
Min: min,
|
||||
Max: max,
|
||||
Exclude: exclude,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -369,16 +415,14 @@ func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
|
||||
|
||||
return TestCase{
|
||||
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"a": a,
|
||||
},
|
||||
StringArgs: map[string]string{
|
||||
"b": b,
|
||||
},
|
||||
Tags: []string{c},
|
||||
&pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"a": a,
|
||||
},
|
||||
StringArgs: map[string]string{
|
||||
"b": b,
|
||||
},
|
||||
Tags: []string{c},
|
||||
},
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
|
177
internal/ipb/messages.pb.go
Normal file
177
internal/ipb/messages.pb.go
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: internal/api/messages.proto
|
||||
|
||||
package ipb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
pb "open-match.dev/open-match/pkg/pb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type BackfillInternal struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Represents a backfill entity which is used to fill partially full matches
|
||||
Backfill *pb.Backfill `protobuf:"bytes,1,opt,name=backfill,proto3" json:"backfill,omitempty"`
|
||||
// List of ticket IDs associated with a current backfill
|
||||
TicketIds []string `protobuf:"bytes,2,rep,name=ticket_ids,json=ticketIds,proto3" json:"ticket_ids,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BackfillInternal) Reset() {
|
||||
*x = BackfillInternal{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_api_messages_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BackfillInternal) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BackfillInternal) ProtoMessage() {}
|
||||
|
||||
func (x *BackfillInternal) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_api_messages_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BackfillInternal.ProtoReflect.Descriptor instead.
|
||||
func (*BackfillInternal) Descriptor() ([]byte, []int) {
|
||||
return file_internal_api_messages_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *BackfillInternal) GetBackfill() *pb.Backfill {
|
||||
if x != nil {
|
||||
return x.Backfill
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BackfillInternal) GetTicketIds() []string {
|
||||
if x != nil {
|
||||
return x.TicketIds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_internal_api_messages_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_internal_api_messages_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
|
||||
0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x62, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c,
|
||||
0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x2f, 0x0a, 0x08, 0x62, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70,
|
||||
0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
|
||||
0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69,
|
||||
0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
|
||||
0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
|
||||
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
|
||||
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
|
||||
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_internal_api_messages_proto_rawDescOnce sync.Once
|
||||
file_internal_api_messages_proto_rawDescData = file_internal_api_messages_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_internal_api_messages_proto_rawDescGZIP() []byte {
|
||||
file_internal_api_messages_proto_rawDescOnce.Do(func() {
|
||||
file_internal_api_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_messages_proto_rawDescData)
|
||||
})
|
||||
return file_internal_api_messages_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_internal_api_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_internal_api_messages_proto_goTypes = []interface{}{
|
||||
(*BackfillInternal)(nil), // 0: openmatch.internal.BackfillInternal
|
||||
(*pb.Backfill)(nil), // 1: openmatch.Backfill
|
||||
}
|
||||
var file_internal_api_messages_proto_depIdxs = []int32{
|
||||
1, // 0: openmatch.internal.BackfillInternal.backfill:type_name -> openmatch.Backfill
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_internal_api_messages_proto_init() }
|
||||
func file_internal_api_messages_proto_init() {
|
||||
if File_internal_api_messages_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_internal_api_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BackfillInternal); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_internal_api_messages_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_internal_api_messages_proto_goTypes,
|
||||
DependencyIndexes: file_internal_api_messages_proto_depIdxs,
|
||||
MessageInfos: file_internal_api_messages_proto_msgTypes,
|
||||
}.Build()
|
||||
File_internal_api_messages_proto = out.File
|
||||
file_internal_api_messages_proto_rawDesc = nil
|
||||
file_internal_api_messages_proto_goTypes = nil
|
||||
file_internal_api_messages_proto_depIdxs = nil
|
||||
}
|
@ -1,71 +1,97 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: internal/api/synchronizer.proto
|
||||
|
||||
package ipb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
pb "open-match.dev/open-match/pkg/pb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type SynchronizeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A match returned by an mmf.
|
||||
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SynchronizeRequest) Reset() { *m = SynchronizeRequest{} }
|
||||
func (m *SynchronizeRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SynchronizeRequest) ProtoMessage() {}
|
||||
func (x *SynchronizeRequest) Reset() {
|
||||
*x = SynchronizeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SynchronizeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SynchronizeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SynchronizeRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_35ff6b85fea1c4b7, []int{0}
|
||||
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *SynchronizeRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SynchronizeRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SynchronizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SynchronizeRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SynchronizeRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SynchronizeRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SynchronizeRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SynchronizeRequest.Size(m)
|
||||
}
|
||||
func (m *SynchronizeRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SynchronizeRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SynchronizeRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SynchronizeRequest) GetProposal() *pb.Match {
|
||||
if m != nil {
|
||||
return m.Proposal
|
||||
func (x *SynchronizeRequest) GetProposal() *pb.Match {
|
||||
if x != nil {
|
||||
return x.Proposal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SynchronizeResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Instructs the backend call that it can start running the mmfs.
|
||||
StartMmfs bool `protobuf:"varint,1,opt,name=start_mmfs,json=startMmfs,proto3" json:"start_mmfs,omitempty"`
|
||||
// Instructs the backend call that it should cancel any RPC calls to the mmfs,
|
||||
@ -73,93 +99,181 @@ type SynchronizeResponse struct {
|
||||
CancelMmfs bool `protobuf:"varint,2,opt,name=cancel_mmfs,json=cancelMmfs,proto3" json:"cancel_mmfs,omitempty"`
|
||||
// A match ID returned by the evaluator and should be returned to the FetchMatches
|
||||
// caller.
|
||||
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SynchronizeResponse) Reset() { *m = SynchronizeResponse{} }
|
||||
func (m *SynchronizeResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SynchronizeResponse) ProtoMessage() {}
|
||||
func (x *SynchronizeResponse) Reset() {
|
||||
*x = SynchronizeResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SynchronizeResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SynchronizeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*SynchronizeResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_35ff6b85fea1c4b7, []int{1}
|
||||
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *SynchronizeResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SynchronizeResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SynchronizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SynchronizeResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SynchronizeResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SynchronizeResponse.Merge(m, src)
|
||||
}
|
||||
func (m *SynchronizeResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_SynchronizeResponse.Size(m)
|
||||
}
|
||||
func (m *SynchronizeResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SynchronizeResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SynchronizeResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *SynchronizeResponse) GetStartMmfs() bool {
|
||||
if m != nil {
|
||||
return m.StartMmfs
|
||||
func (x *SynchronizeResponse) GetStartMmfs() bool {
|
||||
if x != nil {
|
||||
return x.StartMmfs
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *SynchronizeResponse) GetCancelMmfs() bool {
|
||||
if m != nil {
|
||||
return m.CancelMmfs
|
||||
func (x *SynchronizeResponse) GetCancelMmfs() bool {
|
||||
if x != nil {
|
||||
return x.CancelMmfs
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *SynchronizeResponse) GetMatchId() string {
|
||||
if m != nil {
|
||||
return m.MatchId
|
||||
func (x *SynchronizeResponse) GetMatchId() string {
|
||||
if x != nil {
|
||||
return x.MatchId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SynchronizeRequest)(nil), "openmatch.internal.SynchronizeRequest")
|
||||
proto.RegisterType((*SynchronizeResponse)(nil), "openmatch.internal.SynchronizeResponse")
|
||||
var File_internal_api_synchronizer_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_internal_api_synchronizer_proto_rawDesc = []byte{
|
||||
0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73,
|
||||
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x12, 0x53, 0x79, 0x6e,
|
||||
0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61,
|
||||
0x74, 0x63, 0x68, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x22, 0x76, 0x0a,
|
||||
0x13, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6d, 0x6d,
|
||||
0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4d,
|
||||
0x6d, 0x66, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x6d, 0x6d,
|
||||
0x66, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c,
|
||||
0x4d, 0x6d, 0x66, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x4a,
|
||||
0x04, 0x08, 0x03, 0x10, 0x04, 0x32, 0x72, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
|
||||
0x6e, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
|
||||
0x6e, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
|
||||
0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
|
||||
0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
|
||||
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
|
||||
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
|
||||
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("internal/api/synchronizer.proto", fileDescriptor_35ff6b85fea1c4b7) }
|
||||
var (
|
||||
file_internal_api_synchronizer_proto_rawDescOnce sync.Once
|
||||
file_internal_api_synchronizer_proto_rawDescData = file_internal_api_synchronizer_proto_rawDesc
|
||||
)
|
||||
|
||||
var fileDescriptor_35ff6b85fea1c4b7 = []byte{
|
||||
// 263 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
|
||||
0x10, 0xc5, 0x89, 0x16, 0x4d, 0x27, 0x1e, 0xca, 0x7a, 0xa9, 0x05, 0x69, 0xe9, 0xa1, 0xe6, 0xa0,
|
||||
0x1b, 0xa9, 0xdf, 0xa0, 0x37, 0x85, 0x5e, 0xe2, 0xcd, 0x4b, 0xd9, 0x24, 0x53, 0xbb, 0x90, 0xfd,
|
||||
0xe3, 0xce, 0x5a, 0xd0, 0x4f, 0x2f, 0xd9, 0xc5, 0xa6, 0xd2, 0x83, 0x97, 0x85, 0x37, 0xf3, 0xdb,
|
||||
0x37, 0xcc, 0x1b, 0x98, 0x4a, 0xed, 0xd1, 0x69, 0xd1, 0x16, 0xc2, 0xca, 0x82, 0xbe, 0x74, 0xbd,
|
||||
0x73, 0x46, 0xcb, 0x6f, 0x74, 0xdc, 0x3a, 0xe3, 0x0d, 0x63, 0xc6, 0xa2, 0x56, 0xc2, 0xd7, 0x3b,
|
||||
0xfe, 0x8b, 0x4e, 0x58, 0xc7, 0x2a, 0x24, 0x12, 0xef, 0x48, 0x91, 0x9b, 0xaf, 0x80, 0xbd, 0xf6,
|
||||
0xbf, 0x4b, 0xfc, 0xf8, 0x44, 0xf2, 0xec, 0x1e, 0x52, 0xeb, 0x8c, 0x35, 0x24, 0xda, 0x71, 0x32,
|
||||
0x4b, 0xf2, 0x6c, 0x39, 0xe2, 0xbd, 0xe1, 0xba, 0x7b, 0xcb, 0x03, 0x31, 0xdf, 0xc3, 0xf5, 0x1f,
|
||||
0x0f, 0xb2, 0x46, 0x13, 0xb2, 0x5b, 0x00, 0xf2, 0xc2, 0xf9, 0x8d, 0x52, 0x5b, 0x0a, 0x36, 0x69,
|
||||
0x39, 0x0c, 0x95, 0xb5, 0xda, 0x12, 0x9b, 0x42, 0x56, 0x0b, 0x5d, 0x63, 0x1b, 0xfb, 0x67, 0xa1,
|
||||
0x0f, 0xb1, 0x14, 0x80, 0x1b, 0x48, 0xc3, 0xbc, 0x8d, 0x6c, 0xc6, 0x83, 0x59, 0x92, 0x0f, 0xcb,
|
||||
0xcb, 0xa0, 0x9f, 0x9b, 0x97, 0x41, 0x7a, 0x3e, 0x1a, 0x2c, 0x1d, 0x5c, 0x1d, 0xcd, 0x75, 0xac,
|
||||
0x82, 0xec, 0x48, 0xb3, 0x05, 0x3f, 0xcd, 0x80, 0x9f, 0x2e, 0x3b, 0xb9, 0xfb, 0x97, 0x8b, 0x0b,
|
||||
0xe5, 0xc9, 0x63, 0xb2, 0xca, 0xdf, 0x16, 0x1d, 0xfd, 0x10, 0xf1, 0x06, 0xf7, 0x45, 0x2f, 0x8b,
|
||||
0xc3, 0x51, 0xa4, 0xad, 0xaa, 0x8b, 0x10, 0xf0, 0xd3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84,
|
||||
0xba, 0xf6, 0x41, 0xab, 0x01, 0x00, 0x00,
|
||||
func file_internal_api_synchronizer_proto_rawDescGZIP() []byte {
|
||||
file_internal_api_synchronizer_proto_rawDescOnce.Do(func() {
|
||||
file_internal_api_synchronizer_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_synchronizer_proto_rawDescData)
|
||||
})
|
||||
return file_internal_api_synchronizer_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_internal_api_synchronizer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_internal_api_synchronizer_proto_goTypes = []interface{}{
|
||||
(*SynchronizeRequest)(nil), // 0: openmatch.internal.SynchronizeRequest
|
||||
(*SynchronizeResponse)(nil), // 1: openmatch.internal.SynchronizeResponse
|
||||
(*pb.Match)(nil), // 2: openmatch.Match
|
||||
}
|
||||
var file_internal_api_synchronizer_proto_depIdxs = []int32{
|
||||
2, // 0: openmatch.internal.SynchronizeRequest.proposal:type_name -> openmatch.Match
|
||||
0, // 1: openmatch.internal.Synchronizer.Synchronize:input_type -> openmatch.internal.SynchronizeRequest
|
||||
1, // 2: openmatch.internal.Synchronizer.Synchronize:output_type -> openmatch.internal.SynchronizeResponse
|
||||
2, // [2:3] is the sub-list for method output_type
|
||||
1, // [1:2] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_internal_api_synchronizer_proto_init() }
|
||||
func file_internal_api_synchronizer_proto_init() {
|
||||
if File_internal_api_synchronizer_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_internal_api_synchronizer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SynchronizeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_api_synchronizer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SynchronizeResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_internal_api_synchronizer_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_internal_api_synchronizer_proto_goTypes,
|
||||
DependencyIndexes: file_internal_api_synchronizer_proto_depIdxs,
|
||||
MessageInfos: file_internal_api_synchronizer_proto_msgTypes,
|
||||
}.Build()
|
||||
File_internal_api_synchronizer_proto = out.File
|
||||
file_internal_api_synchronizer_proto_rawDesc = nil
|
||||
file_internal_api_synchronizer_proto_goTypes = nil
|
||||
file_internal_api_synchronizer_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// SynchronizerClient is the client API for Synchronizer service.
|
||||
//
|
||||
@ -171,10 +285,10 @@ type SynchronizerClient interface {
|
||||
}
|
||||
|
||||
type synchronizerClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewSynchronizerClient(cc *grpc.ClientConn) SynchronizerClient {
|
||||
func NewSynchronizerClient(cc grpc.ClientConnInterface) SynchronizerClient {
|
||||
return &synchronizerClient{cc}
|
||||
}
|
||||
|
||||
@ -220,7 +334,7 @@ type SynchronizerServer interface {
|
||||
type UnimplementedSynchronizerServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedSynchronizerServer) Synchronize(srv Synchronizer_SynchronizeServer) error {
|
||||
func (*UnimplementedSynchronizerServer) Synchronize(Synchronizer_SynchronizeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Synchronize not implemented")
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,9 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
@ -36,25 +39,34 @@ import (
|
||||
func TestSecureGRPCFromConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true)
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
|
||||
defer closer()
|
||||
|
||||
runGrpcClientTests(t, require, cfg, rpcParams)
|
||||
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
|
||||
}
|
||||
|
||||
func TestInsecureGRPCFromConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false)
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
|
||||
defer closer()
|
||||
|
||||
runGrpcClientTests(t, require, cfg, rpcParams)
|
||||
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
|
||||
}
|
||||
|
||||
func TestUnavailableGRPCFromConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "badhost")
|
||||
defer closer()
|
||||
|
||||
runFailureGrpcClientTests(t, require, cfg, rpcParams, codes.Unavailable)
|
||||
}
|
||||
|
||||
func TestHTTPSFromConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true)
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
|
||||
defer closer()
|
||||
|
||||
runHTTPClientTests(require, cfg, rpcParams)
|
||||
@ -63,7 +75,7 @@ func TestHTTPSFromConfig(t *testing.T) {
|
||||
func TestInsecureHTTPFromConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false)
|
||||
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
|
||||
defer closer()
|
||||
|
||||
runHTTPClientTests(require, cfg, rpcParams)
|
||||
@ -96,7 +108,7 @@ func TestSanitizeHTTPAddress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
|
||||
func setupClientConnection(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) *grpc.ClientConn {
|
||||
// Serve a fake frontend server and wait for its full start up
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
rpcParams.AddHandleFunc(func(s *grpc.Server) {
|
||||
@ -104,7 +116,9 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
|
||||
s := &Server{}
|
||||
defer s.Stop()
|
||||
t.Cleanup(func() {
|
||||
defer s.Stop()
|
||||
})
|
||||
err := s.Start(rpcParams)
|
||||
require.Nil(err)
|
||||
|
||||
@ -112,6 +126,11 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
|
||||
grpcConn, err := GRPCClientFromConfig(cfg, "test")
|
||||
require.Nil(err)
|
||||
require.NotNil(grpcConn)
|
||||
return grpcConn
|
||||
}
|
||||
|
||||
func runSuccessGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
|
||||
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
|
||||
|
||||
// Confirm the client works as expected
|
||||
ctx := utilTesting.NewContext(t)
|
||||
@ -121,6 +140,20 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
|
||||
require.NotNil(grpcResp)
|
||||
}
|
||||
|
||||
func runFailureGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams, expectedCode codes.Code) {
|
||||
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
|
||||
|
||||
// Confirm the client works as expected
|
||||
ctx := utilTesting.NewContext(t)
|
||||
feClient := pb.NewFrontendServiceClient(grpcConn)
|
||||
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
|
||||
require.Error(err)
|
||||
require.Nil(grpcResp)
|
||||
|
||||
code := status.Code(err)
|
||||
require.Equal(expectedCode, code)
|
||||
}
|
||||
|
||||
func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
|
||||
// Serve a fake frontend server and wait for its full start up
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
@ -157,7 +190,7 @@ func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams
|
||||
}
|
||||
|
||||
// Generate a config view and optional TLS key manifests (optional) for testing
|
||||
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
|
||||
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool, host string) (config.View, *ServerParams, func()) {
|
||||
// Create netlisteners on random ports used for rpc serving
|
||||
grpcL := MustListen()
|
||||
httpL := MustListen()
|
||||
@ -165,7 +198,7 @@ func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions,
|
||||
|
||||
// Generate a config view with paths to the manifests
|
||||
cfg := viper.New()
|
||||
cfg.Set("test.hostname", "localhost")
|
||||
cfg.Set("test.hostname", host)
|
||||
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
|
||||
cfg.Set("test.httpport", MustGetPortNumber(httpL))
|
||||
|
||||
|
@ -19,9 +19,10 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
)
|
||||
|
||||
@ -37,7 +38,19 @@ type insecureServer struct {
|
||||
|
||||
func (s *insecureServer) start(params *ServerParams) error {
|
||||
s.httpMux = params.ServeMux
|
||||
s.proxyMux = runtime.NewServeMux()
|
||||
s.proxyMux = runtime.NewServeMux(
|
||||
runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.HTTPBodyMarshaler{
|
||||
Marshaler: &runtime.JSONPb{
|
||||
MarshalOptions: protojson.MarshalOptions{
|
||||
UseProtoNames: true,
|
||||
EmitUnpopulated: false,
|
||||
},
|
||||
UnmarshalOptions: protojson.UnmarshalOptions{
|
||||
DiscardUnknown: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
// Configure the gRPC server.
|
||||
s.grpcServer = grpc.NewServer(newGRPCServerOptions(params)...)
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
|
||||
grpc_tracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpc_validator "github.com/grpc-ecosystem/go-grpc-middleware/validator"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
|
@ -21,10 +21,11 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
)
|
||||
|
||||
@ -45,7 +46,19 @@ type tlsServer struct {
|
||||
|
||||
func (s *tlsServer) start(params *ServerParams) error {
|
||||
s.httpMux = params.ServeMux
|
||||
s.proxyMux = runtime.NewServeMux()
|
||||
s.proxyMux = runtime.NewServeMux(
|
||||
runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.HTTPBodyMarshaler{
|
||||
Marshaler: &runtime.JSONPb{
|
||||
MarshalOptions: protojson.MarshalOptions{
|
||||
UseProtoNames: true,
|
||||
EmitUnpopulated: false,
|
||||
},
|
||||
UnmarshalOptions: protojson.UnmarshalOptions{
|
||||
DiscardUnknown: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
_, grpcPort, err := net.SplitHostPort(s.grpcListener.Addr().String())
|
||||
if err != nil {
|
||||
|
469
internal/statestore/backfill.go
Normal file
469
internal/statestore/backfill.go
Normal file
@ -0,0 +1,469 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package statestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/ipb"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "openmatch",
|
||||
"component": "statestore.redis",
|
||||
})
|
||||
)
|
||||
|
||||
const (
|
||||
backfillLastAckTime = "backfill_last_ack_time"
|
||||
allBackfills = "allBackfills"
|
||||
)
|
||||
|
||||
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist. The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization. Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
|
||||
func (rb *redisBackend) CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "CreateBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
bf := ipb.BackfillInternal{
|
||||
Backfill: backfill,
|
||||
TicketIds: ticketIDs,
|
||||
}
|
||||
|
||||
value, err := proto.Marshal(&bf)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to marshal the backfill proto, id: %s", backfill.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
res, err := redisConn.Do("SETNX", backfill.GetId(), value)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to set the value for backfill, id: %s", backfill.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if res.(int64) == 0 {
|
||||
return status.Errorf(codes.AlreadyExists, "backfill already exists, id: %s", backfill.GetId())
|
||||
}
|
||||
|
||||
return doUpdateAcknowledgmentTimestamp(redisConn, backfill.GetId())
|
||||
}
|
||||
|
||||
// GetBackfill gets the Backfill with the specified id from state storage. This method fails if the Backfill does not exist. Returns the Backfill and associated ticketIDs if they exist.
|
||||
func (rb *redisBackend) GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "GetBackfill, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := redis.Bytes(redisConn.Do("GET", id))
|
||||
if err != nil {
|
||||
// Return NotFound if redigo did not find the backfill in storage.
|
||||
if err == redis.ErrNil {
|
||||
return nil, nil, status.Errorf(codes.NotFound, "Backfill id: %s not found", id)
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "failed to get the backfill from state storage, id: %s", id)
|
||||
return nil, nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return nil, nil, status.Errorf(codes.NotFound, "Backfill id: %s not found", id)
|
||||
}
|
||||
|
||||
bi := &ipb.BackfillInternal{}
|
||||
err = proto.Unmarshal(value, bi)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal internal backfill, id: %s", id)
|
||||
return nil, nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return bi.Backfill, bi.TicketIds, nil
|
||||
}
|
||||
|
||||
// GetBackfills returns multiple backfills from storage
|
||||
func (rb *redisBackend) GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetBackfills, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
queryParams := make([]interface{}, len(ids))
|
||||
for i, id := range ids {
|
||||
queryParams[i] = id
|
||||
}
|
||||
|
||||
slices, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to lookup backfills: %v", ids)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
m := make(map[string]*pb.Backfill, len(ids))
|
||||
for i, s := range slices {
|
||||
if s != nil {
|
||||
b := &ipb.BackfillInternal{}
|
||||
err = proto.Unmarshal(s, b)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal backfill from redis, key: %s", ids[i])
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if b.Backfill != nil {
|
||||
m[b.Backfill.Id] = b.Backfill
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var notFound []string
|
||||
result := make([]*pb.Backfill, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
if b, ok := m[id]; ok {
|
||||
result = append(result, b)
|
||||
} else {
|
||||
notFound = append(notFound, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notFound) > 0 {
|
||||
redisLogger.Warningf("failed to lookup backfills: %v", notFound)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DeleteBackfill removes the Backfill with the specified id from state storage.
|
||||
func (rb *redisBackend) DeleteBackfill(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeleteBackfill, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := redis.Int(redisConn.Do("DEL", id))
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to delete the backfill from state storage, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if value == 0 {
|
||||
return status.Errorf(codes.NotFound, "Backfill id: %s not found", id)
|
||||
}
|
||||
|
||||
return rb.deleteExpiredBackfillID(redisConn, id)
|
||||
}
|
||||
|
||||
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
|
||||
func (rb *redisBackend) UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "UpdateBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
expired, err := isBackfillExpired(redisConn, backfill.Id, getBackfillReleaseTimeout(rb.cfg))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expired {
|
||||
return status.Errorf(codes.Unavailable, "can not update an expired backfill, id: %s", backfill.Id)
|
||||
}
|
||||
|
||||
bf := ipb.BackfillInternal{
|
||||
Backfill: backfill,
|
||||
TicketIds: ticketIDs,
|
||||
}
|
||||
|
||||
value, err := proto.Marshal(&bf)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to marshal the backfill proto, id: %s", backfill.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("SET", backfill.GetId(), value)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to set the value for backfill, id: %s", backfill.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isBackfillExpired(conn redis.Conn, id string, ttl time.Duration) (bool, error) {
|
||||
lastAckTime, err := redis.Float64(conn.Do("ZSCORE", backfillLastAckTime, id))
|
||||
if err != nil {
|
||||
return false, status.Errorf(codes.Internal, "%v",
|
||||
errors.Wrapf(err, "failed to get backfill's last acknowledgement time, id: %s", id))
|
||||
}
|
||||
|
||||
endTime := time.Now().Add(-ttl).UnixNano()
|
||||
return int64(lastAckTime) < endTime, nil
|
||||
}
|
||||
|
||||
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
|
||||
func (rb *redisBackend) DeleteBackfillCompletely(ctx context.Context, id string) error {
|
||||
m := rb.NewMutex(id)
|
||||
err := m.Lock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if _, err = m.Unlock(ctx); err != nil {
|
||||
logger.WithError(err).Error("error on mutex unlock")
|
||||
}
|
||||
}()
|
||||
|
||||
// 1. deindex backfill
|
||||
err = rb.DeindexBackfill(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// just log errors and try to perform as mush actions as possible
|
||||
|
||||
// 2. get associated with a current backfill tickets ids
|
||||
_, associatedTickets, err := rb.GetBackfill(ctx, id)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"backfill_id": id,
|
||||
}).Error("DeleteBackfillCompletely - failed to GetBackfill")
|
||||
}
|
||||
|
||||
// 3. delete associated tickets from pending release state
|
||||
err = rb.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"backfill_id": id,
|
||||
}).Error("DeleteBackfillCompletely - failed to DeleteTicketsFromPendingRelease")
|
||||
}
|
||||
|
||||
// 4. delete backfill
|
||||
err = rb.DeleteBackfill(ctx, id)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"backfill_id": id,
|
||||
}).Error("DeleteBackfillCompletely - failed to DeleteBackfill")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redisBackend) cleanupWorker(ctx context.Context, backfillIDsCh <-chan string, wg *sync.WaitGroup) {
|
||||
var err error
|
||||
for id := range backfillIDsCh {
|
||||
err = rb.DeleteBackfillCompletely(ctx, id)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"backfill_id": id,
|
||||
}).Error("CleanupBackfills")
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupBackfills removes expired backfills
|
||||
func (rb *redisBackend) CleanupBackfills(ctx context.Context) error {
|
||||
expiredBfIDs, err := rb.GetExpiredBackfillIDs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(expiredBfIDs))
|
||||
backfillIDsCh := make(chan string, len(expiredBfIDs))
|
||||
|
||||
for w := 1; w <= 3; w++ {
|
||||
go rb.cleanupWorker(ctx, backfillIDsCh, &wg)
|
||||
}
|
||||
|
||||
for _, id := range expiredBfIDs {
|
||||
backfillIDsCh <- id
|
||||
}
|
||||
close(backfillIDsCh)
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateAcknowledgmentTimestamp stores Backfill's last acknowledgement time.
|
||||
// Check on Backfill existence should be performed on Frontend side
|
||||
func (rb *redisBackend) UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "UpdateAcknowledgmentTimestamp, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
expired, err := isBackfillExpired(redisConn, id, getBackfillReleaseTimeout(rb.cfg))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expired {
|
||||
return status.Errorf(codes.Unavailable, "can not acknowledge an expired backfill, id: %s", id)
|
||||
}
|
||||
|
||||
return doUpdateAcknowledgmentTimestamp(redisConn, id)
|
||||
}
|
||||
|
||||
func doUpdateAcknowledgmentTimestamp(conn redis.Conn, backfillID string) error {
|
||||
currentTime := time.Now().UnixNano()
|
||||
|
||||
_, err := conn.Do("ZADD", backfillLastAckTime, currentTime, backfillID)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "%v",
|
||||
errors.Wrap(err, "failed to store backfill's last acknowledgement time"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetExpiredBackfillIDs gets all backfill IDs which are expired
|
||||
func (rb *redisBackend) GetExpiredBackfillIDs(ctx context.Context) ([]string, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetExpiredBackfillIDs, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
ttl := getBackfillReleaseTimeout(rb.cfg)
|
||||
curTime := time.Now()
|
||||
endTimeInt := curTime.Add(-ttl).UnixNano()
|
||||
startTimeInt := 0
|
||||
|
||||
// Filter out backfill IDs that are fetched but not assigned within TTL time (ms).
|
||||
expiredBackfillIds, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", backfillLastAckTime, startTimeInt, endTimeInt))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting expired backfills %v", err)
|
||||
}
|
||||
|
||||
return expiredBackfillIds, nil
|
||||
}
|
||||
|
||||
// deleteExpiredBackfillID deletes expired BackfillID from a sorted set
|
||||
func (rb *redisBackend) deleteExpiredBackfillID(conn redis.Conn, backfillID string) error {
|
||||
|
||||
_, err := conn.Do("ZREM", backfillLastAckTime, backfillID)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to delete expired backfill ID %s from Sorted Set %s",
|
||||
backfillID, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndexBackfill adds the backfill to the index.
|
||||
func (rb *redisBackend) IndexBackfill(ctx context.Context, backfill *pb.Backfill) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "IndexBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("HSET", allBackfills, backfill.Id, backfill.Generation)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to add backfill to all backfills, id: %s", backfill.Id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
|
||||
func (rb *redisBackend) DeindexBackfill(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeindexBackfill, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("HDEL", allBackfills, id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to remove ID from backfill index, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIndexedBackfills returns the ids of all backfills currently indexed.
|
||||
func (rb *redisBackend) GetIndexedBackfills(ctx context.Context) (map[string]int, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetIndexedBackfills, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
ttl := getBackfillReleaseTimeout(rb.cfg)
|
||||
curTime := time.Now()
|
||||
endTimeInt := curTime.Add(time.Hour).UnixNano()
|
||||
startTimeInt := curTime.Add(-ttl).UnixNano()
|
||||
|
||||
// Exclude expired backfills
|
||||
acknowledgedIds, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", backfillLastAckTime, startTimeInt, endTimeInt))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting acknowledged backfills %v", err)
|
||||
}
|
||||
|
||||
index, err := redis.StringMap(redisConn.Do("HGETALL", allBackfills))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting all indexed backfill ids %v", err)
|
||||
}
|
||||
|
||||
r := make(map[string]int, len(acknowledgedIds))
|
||||
for _, id := range acknowledgedIds {
|
||||
if generation, ok := index[id]; ok {
|
||||
gen, err := strconv.Atoi(generation)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error while parsing generation into number: %v", err)
|
||||
}
|
||||
r[id] = gen
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
|
||||
}
|
||||
|
||||
func getBackfillReleaseTimeout(cfg config.View) time.Duration {
|
||||
// Use a fraction 80% of pendingRelease Tickets TTL
|
||||
ttl := cfg.GetDuration("pendingReleaseTimeout") / 5 * 4
|
||||
return ttl
|
||||
}
|
813
internal/statestore/backfill_test.go
Normal file
813
internal/statestore/backfill_test.go
Normal file
@ -0,0 +1,813 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package statestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestCreateBackfillLastAckTime(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
bfID := "1234"
|
||||
ctx := utilTesting.NewContext(t)
|
||||
err := service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: bfID,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool := GetRedisPool(cfg)
|
||||
conn := pool.Get()
|
||||
|
||||
// test that Backfill last acknowledged is in a sorted set
|
||||
ts, redisErr := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bfID))
|
||||
require.NoError(t, redisErr)
|
||||
require.True(t, ts > 0, "timestamp is not valid")
|
||||
}
|
||||
|
||||
func TestCreateBackfill(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
bf := pb.Backfill{
|
||||
Id: "1",
|
||||
Generation: 1,
|
||||
}
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
backfill *pb.Backfill
|
||||
ticketIDs []string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ok, backfill is passed, ticketIDs is nil",
|
||||
backfill: &bf,
|
||||
ticketIDs: []string{"1", "2"},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "create existing backfill, err expected",
|
||||
backfill: &bf,
|
||||
ticketIDs: nil,
|
||||
expectedCode: codes.AlreadyExists,
|
||||
expectedMessage: "backfill already exists, id: 1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := service.CreateBackfill(ctx, tc.backfill, tc.ticketIDs)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: "222",
|
||||
}, nil)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "CreateBackfill, id: 222, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestUpdateExistingBackfillNoError(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
// ARRANGE
|
||||
v := &wrappers.DoubleValue{Value: 123}
|
||||
a, err := ptypes.MarshalAny(v)
|
||||
require.NoError(t, err)
|
||||
|
||||
existingBF := pb.Backfill{
|
||||
Id: "123",
|
||||
Generation: 1,
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{"123"},
|
||||
},
|
||||
Extensions: map[string]*any.Any{
|
||||
"qwe": a,
|
||||
},
|
||||
}
|
||||
ticketIDs := []string{"1"}
|
||||
err = service.CreateBackfill(ctx, &existingBF, ticketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
updateBF := pb.Backfill{
|
||||
Id: existingBF.Id,
|
||||
Generation: 5,
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{"456"},
|
||||
},
|
||||
Extensions: map[string]*any.Any{
|
||||
"xyz": a,
|
||||
},
|
||||
}
|
||||
updateTicketIDs := []string{"1"}
|
||||
|
||||
// ACT
|
||||
err = service.UpdateBackfill(ctx, &updateBF, updateTicketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ASSERT
|
||||
backfillActual, tIDsActual, err := service.GetBackfill(ctx, updateBF.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, updateTicketIDs, tIDsActual)
|
||||
require.Equal(t, updateBF.Id, backfillActual.Id)
|
||||
require.Equal(t, updateBF.Generation, backfillActual.Generation)
|
||||
|
||||
require.NotNil(t, backfillActual.SearchFields)
|
||||
require.Equal(t, updateBF.SearchFields.Tags, backfillActual.SearchFields.Tags)
|
||||
|
||||
res := &wrappers.DoubleValue{}
|
||||
err = ptypes.UnmarshalAny(backfillActual.Extensions["xyz"], res)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, v.Value, res.Value)
|
||||
}
|
||||
|
||||
func TestUpdateBackfillDoNotExistCanNotUpdate(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
v := &wrappers.DoubleValue{Value: 123}
|
||||
a, err := ptypes.MarshalAny(v)
|
||||
require.NoError(t, err)
|
||||
|
||||
updateBF := pb.Backfill{
|
||||
Id: "123",
|
||||
Generation: 5,
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{"456"},
|
||||
},
|
||||
Extensions: map[string]*any.Any{
|
||||
"xyz": a,
|
||||
},
|
||||
}
|
||||
updateTicketIDs := []string{"1"}
|
||||
|
||||
err = service.UpdateBackfill(ctx, &updateBF, updateTicketIDs)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Internal.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "failed to get backfill's last acknowledgement time, id: 123")
|
||||
}
|
||||
|
||||
func TestUpdateBackfillExpiredBackfillErrExpected(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
|
||||
bfID := "bf1"
|
||||
bfLastAck := "backfill_last_ack_time"
|
||||
bf := pb.Backfill{
|
||||
Id: bfID,
|
||||
Generation: 5,
|
||||
}
|
||||
|
||||
// add expired but acknowledged backfill
|
||||
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.UpdateBackfill(ctx, &bf, nil)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not update an expired backfill, id: %s", bfID))
|
||||
}
|
||||
|
||||
func TestUpdateBackfillExpiredContextErrExpected(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.UpdateBackfill(ctx, &pb.Backfill{
|
||||
Id: "222",
|
||||
}, nil)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "UpdateBackfill, id: 222, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetBackfill(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
expectedBackfill := &pb.Backfill{
|
||||
Id: "mockBackfillID",
|
||||
Generation: 1,
|
||||
}
|
||||
expectedTicketIDs := []string{"1", "2"}
|
||||
err := service.CreateBackfill(ctx, expectedBackfill, expectedTicketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
backfillID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "backfill is found",
|
||||
backfillID: "mockBackfillID",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, err expected",
|
||||
backfillID: "",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Backfill id: not found",
|
||||
},
|
||||
{
|
||||
description: "wrong id passed, err expected",
|
||||
backfillID: "123456",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Backfill id: 123456 not found",
|
||||
},
|
||||
{
|
||||
description: "item of a wrong type is requested, err expected",
|
||||
backfillID: "wrong-type-key",
|
||||
expectedCode: codes.Internal,
|
||||
expectedMessage: "failed to unmarshal internal backfill, id: wrong-type-key:",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
backfillActual, tidsActual, errActual := service.GetBackfill(ctx, tc.backfillID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
require.NotNil(t, backfillActual)
|
||||
require.Equal(t, expectedBackfill.Id, backfillActual.Id)
|
||||
require.Equal(t, expectedBackfill.SearchFields, backfillActual.SearchFields)
|
||||
require.Equal(t, expectedBackfill.Extensions, backfillActual.Extensions)
|
||||
require.Equal(t, expectedBackfill.Generation, backfillActual.Generation)
|
||||
require.Equal(t, expectedTicketIDs, tidsActual)
|
||||
} else {
|
||||
require.Nil(t, backfillActual)
|
||||
require.Nil(t, tidsActual)
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
bf, tids, err := service.GetBackfill(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, bf)
|
||||
require.Nil(t, tids)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetBackfill, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeleteBackfill(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
//Last Acknowledge timestamp is updated on Frontend CreateBackfill
|
||||
bfID := "mockBackfillID"
|
||||
err := service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: bfID,
|
||||
Generation: 1,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool := GetRedisPool(cfg)
|
||||
conn := pool.Get()
|
||||
|
||||
ts, err := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bfID))
|
||||
require.NoError(t, err)
|
||||
require.True(t, ts > 0, "timestamp is not valid")
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
backfillID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "backfill is found and deleted",
|
||||
backfillID: bfID,
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, err expected",
|
||||
backfillID: "",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Backfill id: not found",
|
||||
},
|
||||
{
|
||||
description: "wrong id passed, err expected",
|
||||
backfillID: "123456",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Backfill id: 123456 not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
errActual := service.DeleteBackfill(ctx, tc.backfillID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
|
||||
_, errGetTicket := service.GetTicket(ctx, tc.backfillID)
|
||||
require.Error(t, errGetTicket)
|
||||
require.Equal(t, codes.NotFound.String(), status.Convert(errGetTicket).Code().String())
|
||||
// test that Backfill also deleted from last acknowledged sorted set
|
||||
_, err = redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, tc.backfillID))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), "redigo: nil returned")
|
||||
} else {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeleteBackfill(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeleteBackfill, id: 12345, failed to connect to redis:")
|
||||
|
||||
}
|
||||
|
||||
// TestUpdateAcknowledgmentTimestampLifecycle test statestore functions - UpdateAcknowledgmentTimestamp, GetExpiredBackfillIDs
|
||||
// and deleteExpiredBackfillID
|
||||
func TestUpdateAcknowledgmentTimestampLifecycle(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
bf1 := "mockBackfillID"
|
||||
bf2 := "mockBackfillID2"
|
||||
err := service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: bf1,
|
||||
Generation: 1,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: bf2,
|
||||
Generation: 1,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
bfIDs, err := service.GetExpiredBackfillIDs(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bfIDs, 0)
|
||||
pendingReleaseTimeout := cfg.GetDuration("pendingReleaseTimeout")
|
||||
|
||||
// Sleep till all Backfills expire
|
||||
time.Sleep(pendingReleaseTimeout)
|
||||
|
||||
// This call also sets initial LastAcknowledge time
|
||||
bfIDs, err = service.GetExpiredBackfillIDs(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bfIDs, 2)
|
||||
require.Contains(t, bfIDs, bf1)
|
||||
require.Contains(t, bfIDs, bf2)
|
||||
|
||||
err = service.UpdateAcknowledgmentTimestamp(ctx, bf1)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bf1))
|
||||
|
||||
err = service.UpdateAcknowledgmentTimestamp(ctx, bf2)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bf2))
|
||||
|
||||
err = service.DeleteBackfill(ctx, bfIDs[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
bfIDs, err = service.GetExpiredBackfillIDs(ctx)
|
||||
require.Len(t, bfIDs, 1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateAcknowledgmentTimestamp(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
|
||||
startTime := time.Now()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
bf1 := "mockBackfillID"
|
||||
|
||||
err := service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: bf1,
|
||||
Generation: 1,
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.UpdateAcknowledgmentTimestamp(ctx, bf1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that Acknowledge timestamp stored valid in Redis
|
||||
pool := GetRedisPool(cfg)
|
||||
conn := pool.Get()
|
||||
res, err := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bf1))
|
||||
require.NoError(t, err)
|
||||
// Create a time.Time from Unix nanoseconds and make sure, that time difference
|
||||
// is less than one second
|
||||
t2 := time.Unix(res/1e9, res%1e9)
|
||||
require.True(t, t2.After(startTime), "UpdateAcknowledgmentTimestamp should update time to a more recent one")
|
||||
}
|
||||
|
||||
func TestUpdateAcknowledgmentTimestamptExpiredBackfillErrExpected(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
|
||||
bfID := "bf1"
|
||||
bfLastAck := "backfill_last_ack_time"
|
||||
|
||||
// add expired but acknowledged backfill
|
||||
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.UpdateAcknowledgmentTimestamp(ctx, bfID)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bfID))
|
||||
}
|
||||
|
||||
func TestUpdateAcknowledgmentTimestampConnectionError(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
bf1 := "mockBackfill"
|
||||
ctx := utilTesting.NewContext(t)
|
||||
cfg = createInvalidRedisConfig()
|
||||
service = New(cfg)
|
||||
require.NotNil(t, service)
|
||||
err := service.UpdateAcknowledgmentTimestamp(ctx, bf1)
|
||||
require.Error(t, err, "failed to connect to redis:")
|
||||
}
|
||||
|
||||
func createInvalidRedisConfig() config.View {
|
||||
cfg := viper.New()
|
||||
|
||||
cfg.Set("redis.hostname", "localhost")
|
||||
cfg.Set("redis.port", 222)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// TestGetExpiredBackfillIDs test statestore function GetExpiredBackfillIDs
|
||||
func TestGetExpiredBackfillIDs(t *testing.T) {
|
||||
// Prepare expired and normal BackfillIds in a Redis Sorted Set
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
|
||||
expID := "expired"
|
||||
goodID := "fresh"
|
||||
pool := GetRedisPool(cfg)
|
||||
conn := pool.Get()
|
||||
_, err := conn.Do("ZADD", backfillLastAckTime, 123, expID)
|
||||
require.NoError(t, err)
|
||||
_, err = conn.Do("ZADD", backfillLastAckTime, time.Now().UnixNano(), goodID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// GetExpiredBackfillIDs should return only expired BF
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
bfIDs, err := service.GetExpiredBackfillIDs(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bfIDs, 1)
|
||||
require.Equal(t, expID, bfIDs[0])
|
||||
}
|
||||
|
||||
func TestIndexBackfill(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
t.Run("WithValidContext", func(t *testing.T) {
|
||||
ctx := utilTesting.NewContext(t)
|
||||
generateBackfills(ctx, t, service, 2)
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("HKEYS", allBackfills))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockBackfillID-1", idsIndexed[1])
|
||||
})
|
||||
|
||||
t.Run("WithCancelledContext", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.IndexBackfill(ctx, &pb.Backfill{
|
||||
Id: "12345",
|
||||
Generation: 42,
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "IndexBackfill, id: 12345, failed to connect to redis:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeindexBackfill(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
generateBackfills(ctx, t, service, 2)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("HKEYS", allBackfills))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockBackfillID-1", idsIndexed[1])
|
||||
|
||||
// deindex and check that there is only 1 backfill in the returned slice
|
||||
err = service.DeindexBackfill(ctx, "mockBackfillID-1")
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err = redis.Strings(c.Do("HKEYS", allBackfills))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 1)
|
||||
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeindexBackfill(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeindexBackfill, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetIndexedBackfills(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
verifyBackfills := func(service Service, backfills []*pb.Backfill) {
|
||||
ids, err := service.GetIndexedBackfills(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(backfills), len(ids))
|
||||
|
||||
for _, bf := range backfills {
|
||||
gen, ok := ids[bf.GetId()]
|
||||
require.Equal(t, bf.Generation, int64(gen))
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// no indexed backfills exists
|
||||
verifyBackfills(service, []*pb.Backfill{})
|
||||
|
||||
// two indexed backfills exists
|
||||
backfills := generateBackfills(ctx, t, service, 2)
|
||||
verifyBackfills(service, backfills)
|
||||
|
||||
// deindex one backfill, one backfill exist
|
||||
err := service.DeindexBackfill(ctx, backfills[0].Id)
|
||||
require.Nil(t, err)
|
||||
verifyBackfills(service, backfills[1:2])
|
||||
}
|
||||
|
||||
func generateBackfills(ctx context.Context, t *testing.T, service Service, amount int) []*pb.Backfill {
|
||||
backfills := make([]*pb.Backfill, 0, amount)
|
||||
|
||||
for i := 0; i < amount; i++ {
|
||||
tmp := &pb.Backfill{
|
||||
Id: fmt.Sprintf("mockBackfillID-%d", i),
|
||||
Generation: 1,
|
||||
}
|
||||
require.NoError(t, service.CreateBackfill(ctx, tmp, []string{}))
|
||||
require.NoError(t, service.IndexBackfill(ctx, tmp))
|
||||
backfills = append(backfills, tmp)
|
||||
}
|
||||
|
||||
return backfills
|
||||
}
|
||||
|
||||
func BenchmarkCleanupBackfills(b *testing.B) {
|
||||
t := &testing.T{}
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
|
||||
createStaleBF := func(bfID string, ticketIDs ...string) {
|
||||
bf := &pb.Backfill{
|
||||
Id: bfID,
|
||||
Generation: 1,
|
||||
}
|
||||
err = service.CreateBackfill(ctx, bf, ticketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = rc.Do("ZADD", "backfill_last_ack_time", 123, bfID)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.AddTicketsToPendingRelease(ctx, ticketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.IndexBackfill(ctx, bf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i := 0; i < 50; i++ {
|
||||
createStaleBF(fmt.Sprintf("b-%d", i), fmt.Sprintf("t1-%d", i), fmt.Sprintf("t1-%d", i+1))
|
||||
}
|
||||
err = service.CleanupBackfills(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupBackfills(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
|
||||
bfID := "mockBackfill-1"
|
||||
ticketIDs := []string{"t1", "t2"}
|
||||
bfLastAck := "backfill_last_ack_time"
|
||||
proposedTicketIDs := "proposed_ticket_ids"
|
||||
allBackfills := "allBackfills"
|
||||
generation := int64(55)
|
||||
bf := &pb.Backfill{
|
||||
Id: bfID,
|
||||
Generation: generation,
|
||||
}
|
||||
|
||||
// ARRANGE
|
||||
err = service.CreateBackfill(ctx, bf, ticketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// add expired but acknowledged backfill
|
||||
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.AddTicketsToPendingRelease(ctx, ticketIDs)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.IndexBackfill(ctx, bf)
|
||||
require.NoError(t, err)
|
||||
|
||||
// backfill is properly indexed
|
||||
index, err := redis.StringMap(rc.Do("HGETALL", allBackfills))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, index, 1)
|
||||
require.Equal(t, strconv.Itoa(int(generation)), index[bfID])
|
||||
|
||||
// ACT
|
||||
err = service.CleanupBackfills(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ASSERT
|
||||
// backfill must be deindexed
|
||||
index, err = redis.StringMap(rc.Do("HGETALL", allBackfills))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, index, 0)
|
||||
|
||||
// backfill doesn't exist anymore
|
||||
_, _, err = service.GetBackfill(ctx, bfID)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "Backfill id: mockBackfill-1 not found", status.Convert(err).Message())
|
||||
|
||||
// no records in backfill sorted set left
|
||||
expiredBackfillIds, err := redis.Strings(rc.Do("ZRANGEBYSCORE", bfLastAck, 0, 200))
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, expiredBackfillIds)
|
||||
|
||||
// no records in tickets sorted set left
|
||||
pendingTickets, err := redis.Strings(rc.Do("ZRANGEBYSCORE", proposedTicketIDs, 0, time.Now().UnixNano()))
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, pendingTickets)
|
||||
}
|
@ -106,3 +106,94 @@ func (is *instrumentedService) ReleaseAllTickets(ctx context.Context) error {
|
||||
defer span.End()
|
||||
return is.s.ReleaseAllTickets(ctx)
|
||||
}
|
||||
|
||||
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist. The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization. Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
|
||||
func (is *instrumentedService) CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.CreateBackfill")
|
||||
defer span.End()
|
||||
return is.s.CreateBackfill(ctx, backfill, ticketIDs)
|
||||
}
|
||||
|
||||
// GetBackfill gets the Backfill with the specified id from state storage. This method fails if the Backfill does not exist. Returns the Backfill and associated ticketIDs if they exist.
|
||||
func (is *instrumentedService) GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetBackfill")
|
||||
defer span.End()
|
||||
return is.s.GetBackfill(ctx, id)
|
||||
}
|
||||
|
||||
// GetBackfills returns multiple backfills from storage.
|
||||
func (is *instrumentedService) GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetBackfills")
|
||||
defer span.End()
|
||||
return is.s.GetBackfills(ctx, ids)
|
||||
}
|
||||
|
||||
// DeleteBackfill removes the Backfill with the specified id from state storage. This method succeeds if the Backfill does not exist.
|
||||
func (is *instrumentedService) DeleteBackfill(ctx context.Context, id string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteBackfill")
|
||||
defer span.End()
|
||||
return is.s.DeleteBackfill(ctx, id)
|
||||
}
|
||||
|
||||
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
|
||||
func (is *instrumentedService) UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateBackfill")
|
||||
defer span.End()
|
||||
return is.s.UpdateBackfill(ctx, backfill, ticketIDs)
|
||||
}
|
||||
|
||||
// NewMutex returns a new distributed mutex with given name
|
||||
func (is *instrumentedService) NewMutex(key string) RedisLocker {
|
||||
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.NewMutex")
|
||||
defer span.End()
|
||||
return is.s.NewMutex(key)
|
||||
}
|
||||
|
||||
// UpdateAcknowledgmentTimestamp stores Backfill's last acknowledged time
|
||||
func (is *instrumentedService) UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateAcknowledgmentTimestamp")
|
||||
defer span.End()
|
||||
return is.s.UpdateAcknowledgmentTimestamp(ctx, id)
|
||||
}
|
||||
|
||||
// GetExpiredBackfillIDs - get all backfills which are expired
|
||||
func (is *instrumentedService) GetExpiredBackfillIDs(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetExpiredBackfillIDs")
|
||||
defer span.End()
|
||||
return is.s.GetExpiredBackfillIDs(ctx)
|
||||
}
|
||||
|
||||
// IndexBackfill adds the backfill to the index.
|
||||
func (is *instrumentedService) IndexBackfill(ctx context.Context, backfill *pb.Backfill) error {
|
||||
_, span := trace.StartSpan(ctx, "statestore/instrumented.IndexBackfill")
|
||||
defer span.End()
|
||||
return is.s.IndexBackfill(ctx, backfill)
|
||||
}
|
||||
|
||||
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
|
||||
func (is *instrumentedService) DeindexBackfill(ctx context.Context, id string) error {
|
||||
_, span := trace.StartSpan(ctx, "statestore/instrumented.DeindexBackfill")
|
||||
defer span.End()
|
||||
return is.s.DeindexBackfill(ctx, id)
|
||||
}
|
||||
|
||||
// GetIndexedBackfills returns the ids of all backfills currently indexed.
|
||||
func (is *instrumentedService) GetIndexedBackfills(ctx context.Context) (map[string]int, error) {
|
||||
_, span := trace.StartSpan(ctx, "statestore/instrumented.GetIndexedBackfills")
|
||||
defer span.End()
|
||||
return is.s.GetIndexedBackfills(ctx)
|
||||
}
|
||||
|
||||
// CleanupBackfills removes expired backfills
|
||||
func (is *instrumentedService) CleanupBackfills(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.CleanupBackfills")
|
||||
defer span.End()
|
||||
return is.s.CleanupBackfills(ctx)
|
||||
}
|
||||
|
||||
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
|
||||
func (is *instrumentedService) DeleteBackfillCompletely(ctx context.Context, id string) error {
|
||||
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.DeleteBackfillCompletely")
|
||||
defer span.End()
|
||||
return is.s.DeleteBackfillCompletely(ctx, id)
|
||||
}
|
||||
|
@ -27,13 +27,20 @@ type Service interface {
|
||||
// HealthCheck indicates if the database is reachable.
|
||||
HealthCheck(ctx context.Context) error
|
||||
|
||||
// Closes the connection to the underlying storage.
|
||||
Close() error
|
||||
|
||||
// Ticket
|
||||
|
||||
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
|
||||
CreateTicket(ctx context.Context, ticket *pb.Ticket) error
|
||||
|
||||
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
|
||||
// GetTicket gets the Ticket with the specified id from state storage.
|
||||
// This method fails if the Ticket does not exist.
|
||||
GetTicket(ctx context.Context, id string) (*pb.Ticket, error)
|
||||
|
||||
// DeleteTicket removes the Ticket with the specified id from state storage. This method succeeds if the Ticket does not exist.
|
||||
// DeleteTicket removes the Ticket with the specified id from state storage.
|
||||
// This method succeeds if the Ticket does not exist.
|
||||
DeleteTicket(ctx context.Context, id string) error
|
||||
|
||||
// IndexTicket adds the ticket to the index.
|
||||
@ -45,26 +52,71 @@ type Service interface {
|
||||
// GetIndexedIDSet returns the ids of all tickets currently indexed.
|
||||
GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error)
|
||||
|
||||
// GetTickets returns multiple tickets from storage. Missing tickets are
|
||||
// silently ignored.
|
||||
// GetTickets returns multiple tickets from storage.
|
||||
// Missing tickets are silently ignored.
|
||||
GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error)
|
||||
|
||||
// UpdateAssignments update using the request's specified tickets with assignments.
|
||||
UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, []*pb.Ticket, error)
|
||||
|
||||
// GetAssignments returns the assignment associated with the input ticket id
|
||||
// GetAssignments returns the assignment associated with the input ticket id.
|
||||
GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error
|
||||
|
||||
// AddTicketsToPendingRelease appends new proposed tickets to the proposed sorted set with current timestamp.
|
||||
AddTicketsToPendingRelease(ctx context.Context, ids []string) error
|
||||
|
||||
// DeleteTicketsFromPendingRelease deletes tickets from the proposed sorted set
|
||||
// DeleteTicketsFromPendingRelease deletes tickets from the proposed sorted set.
|
||||
DeleteTicketsFromPendingRelease(ctx context.Context, ids []string) error
|
||||
|
||||
// ReleaseAllTickets releases all pending tickets back to active
|
||||
// ReleaseAllTickets releases all pending tickets back to active.
|
||||
ReleaseAllTickets(ctx context.Context) error
|
||||
|
||||
// Closes the connection to the underlying storage.
|
||||
Close() error
|
||||
// Backfill
|
||||
|
||||
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist.
|
||||
// The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization.
|
||||
// Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
|
||||
CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error
|
||||
|
||||
// GetBackfill gets the Backfill with the specified id from state storage.
|
||||
// This method fails if the Backfill does not exist.
|
||||
// Returns the Backfill and associated ticketIDs if they exist.
|
||||
GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error)
|
||||
|
||||
// GetBackfills returns multiple backfills from storage
|
||||
GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error)
|
||||
|
||||
// DeleteBackfill removes the Backfill with the specified id from state storage.
|
||||
// This method succeeds if the Backfill does not exist.
|
||||
DeleteBackfill(ctx context.Context, id string) error
|
||||
|
||||
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
|
||||
DeleteBackfillCompletely(ctx context.Context, id string) error
|
||||
|
||||
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
|
||||
UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error
|
||||
|
||||
// NewMutex returns an interface of a new distributed mutex with given name
|
||||
NewMutex(key string) RedisLocker
|
||||
|
||||
// CleanupBackfills removes expired backfills
|
||||
CleanupBackfills(ctx context.Context) error
|
||||
|
||||
// UpdateAcknowledgmentTimestamp updates Backfill's last acknowledged time
|
||||
UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error
|
||||
|
||||
// GetExpiredBackfillIDs gets all backfill IDs which are expired
|
||||
GetExpiredBackfillIDs(ctx context.Context) ([]string, error)
|
||||
|
||||
// IndexBackfill adds the backfill to the index.
|
||||
IndexBackfill(ctx context.Context, backfill *pb.Backfill) error
|
||||
|
||||
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
|
||||
DeindexBackfill(ctx context.Context, id string) error
|
||||
|
||||
// GetIndexedBackfills returns a map containing the IDs and
|
||||
// the Generation number of the backfills currently indexed.
|
||||
GetIndexedBackfills(ctx context.Context) (map[string]int, error)
|
||||
}
|
||||
|
||||
// New creates a Service based on the configuration.
|
||||
@ -77,3 +129,9 @@ func New(cfg config.View) Service {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RedisLocker provides methods to use distributed locks against redis
|
||||
type RedisLocker interface {
|
||||
Lock(ctx context.Context) error
|
||||
Unlock(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
@ -20,20 +20,13 @@ import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/golang/protobuf/proto"
|
||||
rs "github.com/go-redsync/redsync/v4"
|
||||
rsredigo "github.com/go-redsync/redsync/v4/redis/redigo"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
allTickets = "allTickets"
|
||||
proposedTicketIDs = "proposed_ticket_ids"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -41,12 +34,32 @@ var (
|
||||
"app": "openmatch",
|
||||
"component": "statestore.redis",
|
||||
})
|
||||
|
||||
// this field is used to create new mutexes
|
||||
redsync *rs.Redsync
|
||||
)
|
||||
|
||||
// NewMutex returns a new distributed mutex with given name
|
||||
func (rb *redisBackend) NewMutex(key string) RedisLocker {
|
||||
m := redsync.NewMutex(fmt.Sprintf("lock/%s", key), rs.WithExpiry(rb.cfg.GetDuration("backfillLockTimeout")))
|
||||
return redisBackend{mutex: m}
|
||||
}
|
||||
|
||||
//Lock locks r. In case it returns an error on failure, you may retry to acquire the lock by calling this method again.
|
||||
func (rb redisBackend) Lock(ctx context.Context) error {
|
||||
return rb.mutex.LockContext(ctx)
|
||||
}
|
||||
|
||||
// Unlock unlocks r and returns the status of unlock.
|
||||
func (rb redisBackend) Unlock(ctx context.Context) (bool, error) {
|
||||
return rb.mutex.UnlockContext(ctx)
|
||||
}
|
||||
|
||||
type redisBackend struct {
|
||||
healthCheckPool *redis.Pool
|
||||
redisPool *redis.Pool
|
||||
cfg config.View
|
||||
mutex *rs.Mutex
|
||||
}
|
||||
|
||||
// Close the connection to the database.
|
||||
@ -56,9 +69,11 @@ func (rb *redisBackend) Close() error {
|
||||
|
||||
// newRedis creates a statestore.Service backed by Redis database.
|
||||
func newRedis(cfg config.View) Service {
|
||||
pool := GetRedisPool(cfg)
|
||||
redsync = rs.New(rsredigo.NewPool(pool))
|
||||
return &redisBackend{
|
||||
healthCheckPool: getHealthCheckPool(cfg),
|
||||
redisPool: GetRedisPool(cfg),
|
||||
redisPool: pool,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
@ -84,7 +99,7 @@ func getHealthCheckPool(cfg config.View) *redis.Pool {
|
||||
Wait: true,
|
||||
TestOnBorrow: testOnBorrow,
|
||||
DialContext: func(ctx context.Context) (redis.Conn, error) {
|
||||
if ctx.Err() != nil {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return redis.DialURL(healthCheckURL, redis.DialConnectTimeout(healthCheckTimeout), redis.DialReadTimeout(healthCheckTimeout))
|
||||
@ -102,7 +117,7 @@ func GetRedisPool(cfg config.View) *redis.Pool {
|
||||
if cfg.IsSet("redis.sentinelHostname") {
|
||||
sentinelPool := getSentinelPool(cfg)
|
||||
dialFunc = func(ctx context.Context) (redis.Conn, error) {
|
||||
if ctx.Err() != nil {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
@ -129,7 +144,7 @@ func GetRedisPool(cfg config.View) *redis.Pool {
|
||||
masterAddr := getMasterAddr(cfg)
|
||||
masterURL := redisURLFromAddr(masterAddr, cfg, cfg.GetBool("redis.usePassword"))
|
||||
dialFunc = func(ctx context.Context) (redis.Conn, error) {
|
||||
if ctx.Err() != nil {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return redis.DialURL(masterURL, redis.DialConnectTimeout(idleTimeout), redis.DialReadTimeout(idleTimeout))
|
||||
@ -160,7 +175,7 @@ func getSentinelPool(cfg config.View) *redis.Pool {
|
||||
Wait: true,
|
||||
TestOnBorrow: testOnBorrow,
|
||||
DialContext: func(ctx context.Context) (redis.Conn, error) {
|
||||
if ctx.Err() != nil {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
redisLogger.WithField("sentinelAddr", sentinelAddr).Debug("Attempting to connect to Redis Sentinel")
|
||||
@ -223,395 +238,6 @@ func redisURLFromAddr(addr string, cfg config.View, usePassword bool) string {
|
||||
return redisURL + addr
|
||||
}
|
||||
|
||||
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
|
||||
func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "CreateTicket, id: %s, failed to connect to redis: %v", ticket.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := proto.Marshal(ticket)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to marshal the ticket proto, id: %s", ticket.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("SET", ticket.GetId(), value)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to set the value for ticket, id: %s", ticket.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
|
||||
func (rb *redisBackend) GetTicket(ctx context.Context, id string) (*pb.Ticket, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := redis.Bytes(redisConn.Do("GET", id))
|
||||
if err != nil {
|
||||
// Return NotFound if redigo did not find the ticket in storage.
|
||||
if err == redis.ErrNil {
|
||||
msg := fmt.Sprintf("Ticket id: %s not found", id)
|
||||
return nil, status.Error(codes.NotFound, msg)
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "failed to get the ticket from state storage, id: %s", id)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
msg := fmt.Sprintf("Ticket id: %s not found", id)
|
||||
return nil, status.Error(codes.NotFound, msg)
|
||||
}
|
||||
|
||||
ticket := &pb.Ticket{}
|
||||
err = proto.Unmarshal(value, ticket)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal the ticket proto, id: %s", id)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return ticket, nil
|
||||
}
|
||||
|
||||
// DeleteTicket removes the Ticket with the specified id from state storage.
|
||||
func (rb *redisBackend) DeleteTicket(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeleteTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
_, err = redisConn.Do("DEL", id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to delete the ticket from state storage, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndexTicket indexes the Ticket id for the configured index fields.
|
||||
func (rb *redisBackend) IndexTicket(ctx context.Context, ticket *pb.Ticket) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "IndexTicket, id: %s, failed to connect to redis: %v", ticket.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("SADD", allTickets, ticket.Id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to add ticket to all tickets, id: %s", ticket.Id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeindexTicket removes the indexing for the specified Ticket. Only the indexes are removed but the Ticket continues to exist.
|
||||
func (rb *redisBackend) DeindexTicket(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeindexTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("SREM", allTickets, id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to remove ticket from all tickets, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIndexedIds returns the ids of all tickets currently indexed.
|
||||
func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetIndexedIDSet, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
ttl := rb.cfg.GetDuration("pendingReleaseTimeout")
|
||||
curTime := time.Now()
|
||||
endTimeInt := curTime.Add(time.Hour).UnixNano()
|
||||
startTimeInt := curTime.Add(-ttl).UnixNano()
|
||||
|
||||
// Filter out tickets that are fetched but not assigned within ttl time (ms).
|
||||
idsInPendingReleases, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", proposedTicketIDs, startTimeInt, endTimeInt))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting pending release %v", err)
|
||||
}
|
||||
|
||||
idsIndexed, err := redis.Strings(redisConn.Do("SMEMBERS", allTickets))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting all indexed ticket ids %v", err)
|
||||
}
|
||||
|
||||
r := make(map[string]struct{}, len(idsIndexed))
|
||||
for _, id := range idsIndexed {
|
||||
r[id] = struct{}{}
|
||||
}
|
||||
for _, id := range idsInPendingReleases {
|
||||
delete(r, id)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// GetTickets returns multiple tickets from storage. Missing tickets are
|
||||
// silently ignored.
|
||||
func (rb *redisBackend) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetTickets, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
queryParams := make([]interface{}, len(ids))
|
||||
for i, id := range ids {
|
||||
queryParams[i] = id
|
||||
}
|
||||
|
||||
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to lookup tickets %v", ids)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
r := make([]*pb.Ticket, 0, len(ids))
|
||||
|
||||
for i, b := range ticketBytes {
|
||||
// Tickets may be deleted by the time we read it from redis.
|
||||
if b != nil {
|
||||
t := &pb.Ticket{}
|
||||
err = proto.Unmarshal(b, t)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal ticket from redis, key %s", ids[i])
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
r = append(r, t)
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// UpdateAssignments update using the request's specified tickets with assignments.
|
||||
func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, []*pb.Ticket, error) {
|
||||
resp := &pb.AssignTicketsResponse{}
|
||||
if len(req.Assignments) == 0 {
|
||||
return resp, []*pb.Ticket{}, nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "UpdateAssignments, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
idToA := make(map[string]*pb.Assignment)
|
||||
ids := make([]string, 0)
|
||||
idsI := make([]interface{}, 0)
|
||||
for _, a := range req.Assignments {
|
||||
if a.Assignment == nil {
|
||||
return nil, nil, status.Error(codes.InvalidArgument, "AssignmentGroup.Assignment is required")
|
||||
}
|
||||
|
||||
for _, id := range a.TicketIds {
|
||||
if _, ok := idToA[id]; ok {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "Ticket id %s is assigned multiple times in one assign tickets call", id)
|
||||
}
|
||||
|
||||
idToA[id] = a.Assignment
|
||||
ids = append(ids, id)
|
||||
idsI = append(idsI, id)
|
||||
}
|
||||
}
|
||||
|
||||
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", idsI...))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tickets := make([]*pb.Ticket, 0, len(ticketBytes))
|
||||
for i, ticketByte := range ticketBytes {
|
||||
// Tickets may be deleted by the time we read it from redis.
|
||||
if ticketByte == nil {
|
||||
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
|
||||
TicketId: ids[i],
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
})
|
||||
} else {
|
||||
t := &pb.Ticket{}
|
||||
err = proto.Unmarshal(ticketByte, t)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal ticket from redis %s", ids[i])
|
||||
return nil, nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
tickets = append(tickets, t)
|
||||
}
|
||||
}
|
||||
assignmentTimeout := rb.cfg.GetDuration("assignedDeleteTimeout") / time.Millisecond
|
||||
err = redisConn.Send("MULTI")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error starting redis multi")
|
||||
}
|
||||
|
||||
for _, ticket := range tickets {
|
||||
ticket.Assignment = idToA[ticket.Id]
|
||||
|
||||
var ticketByte []byte
|
||||
ticketByte, err = proto.Marshal(ticket)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "failed to marshal ticket %s", ticket.GetId())
|
||||
}
|
||||
|
||||
err = redisConn.Send("SET", ticket.Id, ticketByte, "PX", int64(assignmentTimeout), "XX")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error sending ticket assignment set")
|
||||
}
|
||||
}
|
||||
|
||||
wasSet, err := redis.Values(redisConn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error executing assignment set")
|
||||
}
|
||||
|
||||
if len(wasSet) != len(tickets) {
|
||||
return nil, nil, status.Errorf(codes.Internal, "sent %d tickets to redis, but received %d back", len(tickets), len(wasSet))
|
||||
}
|
||||
|
||||
assignedTickets := make([]*pb.Ticket, 0, len(tickets))
|
||||
for i, ticket := range tickets {
|
||||
v, err := redis.String(wasSet[i], nil)
|
||||
if err == redis.ErrNil {
|
||||
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
|
||||
TicketId: ticket.Id,
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
})
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unexpected error from redis multi set")
|
||||
}
|
||||
if v != "OK" {
|
||||
return nil, nil, status.Errorf(codes.Internal, "unexpected response from redis: %s", v)
|
||||
}
|
||||
assignedTickets = append(assignedTickets, ticket)
|
||||
}
|
||||
|
||||
return resp, assignedTickets, nil
|
||||
}
|
||||
|
||||
// GetAssignments returns the assignment associated with the input ticket id
|
||||
func (rb *redisBackend) GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "GetAssignments, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
backoffOperation := func() error {
|
||||
var ticket *pb.Ticket
|
||||
ticket, err = rb.GetTicket(ctx, id)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
err = callback(ticket.GetAssignment())
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
return status.Error(codes.Unavailable, "listening on assignment updates, waiting for the next backoff")
|
||||
}
|
||||
|
||||
err = backoff.Retry(backoffOperation, rb.newConstantBackoffStrategy())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTicketsToPendingRelease appends new proposed tickets to the proposed sorted set with current timestamp
|
||||
func (rb *redisBackend) AddTicketsToPendingRelease(ctx context.Context, ids []string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "AddTicketsToPendingRelease, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
currentTime := time.Now().UnixNano()
|
||||
cmds := make([]interface{}, 0, 2*len(ids)+1)
|
||||
cmds = append(cmds, proposedTicketIDs)
|
||||
for _, id := range ids {
|
||||
cmds = append(cmds, currentTime, id)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("ZADD", cmds...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to append proposed tickets to pending release")
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTicketsFromPendingRelease deletes tickets from the proposed sorted set
|
||||
func (rb *redisBackend) DeleteTicketsFromPendingRelease(ctx context.Context, ids []string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeleteTicketsFromPendingRelease, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
cmds := make([]interface{}, 0, len(ids)+1)
|
||||
cmds = append(cmds, proposedTicketIDs)
|
||||
for _, id := range ids {
|
||||
cmds = append(cmds, id)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("ZREM", cmds...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to delete proposed tickets from pending release")
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redisBackend) ReleaseAllTickets(ctx context.Context) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "ReleaseAllTickets, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
_, err = redisConn.Do("DEL", proposedTicketIDs)
|
||||
return err
|
||||
}
|
||||
|
||||
func handleConnectionClose(conn *redis.Conn) {
|
||||
err := (*conn).Close()
|
||||
if err != nil {
|
||||
@ -620,20 +246,3 @@ func handleConnectionClose(conn *redis.Conn) {
|
||||
}).Debug("failed to close redis client connection.")
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *redisBackend) newConstantBackoffStrategy() backoff.BackOff {
|
||||
backoffStrat := backoff.NewConstantBackOff(rb.cfg.GetDuration("backoff.initialInterval"))
|
||||
return backoff.BackOff(backoffStrat)
|
||||
}
|
||||
|
||||
// TODO: add cache the backoff object
|
||||
// nolint: unused
|
||||
func (rb *redisBackend) newExponentialBackoffStrategy() backoff.BackOff {
|
||||
backoffStrat := backoff.NewExponentialBackOff()
|
||||
backoffStrat.InitialInterval = rb.cfg.GetDuration("backoff.initialInterval")
|
||||
backoffStrat.RandomizationFactor = rb.cfg.GetFloat64("backoff.randFactor")
|
||||
backoffStrat.Multiplier = rb.cfg.GetFloat64("backoff.multiplier")
|
||||
backoffStrat.MaxInterval = rb.cfg.GetDuration("backoff.maxInterval")
|
||||
backoffStrat.MaxElapsedTime = rb.cfg.GetDuration("backoff.maxElapsedTime")
|
||||
return backoff.BackOff(backoffStrat)
|
||||
}
|
||||
|
@ -1,175 +1,14 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package statestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Bose/minisentinel"
|
||||
miniredis "github.com/alicebob/miniredis/v2"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/rs/xid"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestStatestoreSetup(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
}
|
||||
|
||||
func TestTicketLifecycle(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
// Initialize test data
|
||||
id := xid.New().String()
|
||||
ticket := &pb.Ticket{
|
||||
Id: id,
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"testindex1": 42,
|
||||
},
|
||||
},
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: "test-tbd",
|
||||
},
|
||||
}
|
||||
|
||||
// Validate that GetTicket fails for a Ticket that does not exist.
|
||||
_, err := service.GetTicket(ctx, id)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, status.Code(err), codes.NotFound)
|
||||
|
||||
// Validate nonexisting Ticket deletion
|
||||
err = service.DeleteTicket(ctx, id)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Validate nonexisting Ticket deindexing
|
||||
err = service.DeindexTicket(ctx, id)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Validate Ticket creation
|
||||
err = service.CreateTicket(ctx, ticket)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Validate Ticket retrival
|
||||
result, err := service.GetTicket(ctx, ticket.Id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, ticket.Id, result.Id)
|
||||
require.Equal(t, ticket.SearchFields.DoubleArgs["testindex1"], result.SearchFields.DoubleArgs["testindex1"])
|
||||
require.NotNil(t, result.Assignment)
|
||||
require.Equal(t, ticket.Assignment.Connection, result.Assignment.Connection)
|
||||
|
||||
// Validate Ticket deletion
|
||||
err = service.DeleteTicket(ctx, id)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = service.GetTicket(ctx, id)
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestGetAssignmentBeforeSet(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
var assignmentResp *pb.Assignment
|
||||
|
||||
err := service.GetAssignments(ctx, "id", func(assignment *pb.Assignment) error {
|
||||
assignmentResp = assignment
|
||||
return nil
|
||||
})
|
||||
// GetAssignment failed because the ticket does not exists
|
||||
require.Equal(t, status.Convert(err).Code(), codes.NotFound)
|
||||
require.Nil(t, assignmentResp)
|
||||
}
|
||||
|
||||
func TestGetAssignmentNormal(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
var assignmentResp *pb.Assignment
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
callbackCount := 0
|
||||
returnedErr := errors.New("some errors")
|
||||
|
||||
err = service.GetAssignments(ctx, "1", func(assignment *pb.Assignment) error {
|
||||
assignmentResp = assignment
|
||||
|
||||
if callbackCount == 5 {
|
||||
cancel()
|
||||
return returnedErr
|
||||
} else if callbackCount > 0 {
|
||||
// Test the assignment returned was successfully passed in to the callback function
|
||||
require.Equal(t, assignmentResp.Connection, "2")
|
||||
}
|
||||
|
||||
callbackCount++
|
||||
return nil
|
||||
})
|
||||
|
||||
// Test GetAssignments was retried for 5 times and returned with expected error
|
||||
require.Equal(t, 5, callbackCount)
|
||||
require.Equal(t, returnedErr, err)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.GetAssignments(ctx, "1", func(assignment *pb.Assignment) error { return nil })
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetAssignments, id: 1, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestUpdateAssignments(t *testing.T) {
|
||||
func TestNewMutex(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
@ -177,795 +16,18 @@ func TestUpdateAssignments(t *testing.T) {
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
mutex := service.NewMutex("key")
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
|
||||
err := mutex.Lock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
type expected struct {
|
||||
resp *pb.AssignTicketsResponse
|
||||
errCode codes.Code
|
||||
errMessage string
|
||||
assignedTicketsIDs []string
|
||||
}
|
||||
err = service.CreateBackfill(ctx, &pb.Backfill{
|
||||
Id: "222",
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
request *pb.AssignTicketsRequest
|
||||
expected
|
||||
}{
|
||||
{
|
||||
description: "no assignments, empty response is returned",
|
||||
request: &pb.AssignTicketsRequest{},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "updated assignments, no errors",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{"1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "nil assignment, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.InvalidArgument,
|
||||
errMessage: "AssignmentGroup.Assignment is required",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "ticket is assigned multiple times, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.InvalidArgument,
|
||||
errMessage: "Ticket id 1 is assigned multiple times in one assign tickets call",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "ticket doesn't exist, no error, response failure expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"11111"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{
|
||||
Failures: []*pb.AssignmentFailure{{
|
||||
TicketId: "11111",
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
}},
|
||||
},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "wrong value, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"wrong-type-key"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.Internal,
|
||||
errMessage: "failed to unmarshal ticket from redis wrong-type-key",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := mutex.Unlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.True(t, b)
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
resp, ticketsAssignedActual, errActual := service.UpdateAssignments(ctx, tc.request)
|
||||
if tc.expected.errCode != codes.OK {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expected.errCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expected.errMessage)
|
||||
} else {
|
||||
require.NoError(t, errActual)
|
||||
require.Equal(t, tc.expected.resp, resp)
|
||||
require.Equal(t, len(tc.expected.assignedTicketsIDs), len(ticketsAssignedActual))
|
||||
|
||||
for _, ticket := range ticketsAssignedActual {
|
||||
found := false
|
||||
for _, id := range tc.expected.assignedTicketsIDs {
|
||||
if ticket.GetId() == id {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Truef(t, found, "assigned ticket ID %s is not found in an expected slice", ticket.GetId())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, _, err = service.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"11111"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "UpdateAssignments, failed to connect to redis: context canceled")
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
testConnect(t, false, "")
|
||||
testConnect(t, false, "redispassword")
|
||||
testConnect(t, true, "")
|
||||
testConnect(t, true, "redispassword")
|
||||
}
|
||||
|
||||
func TestHealthCheck(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
// OK
|
||||
ctx := utilTesting.NewContext(t)
|
||||
err := service.HealthCheck(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Error expected
|
||||
closer()
|
||||
err = service.HealthCheck(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
}
|
||||
|
||||
func TestCreateTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticket *pb.Ticket
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ok",
|
||||
ticket: &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "nil ticket passed, err expected",
|
||||
ticket: nil,
|
||||
expectedCode: codes.Internal,
|
||||
expectedMessage: "failed to marshal the ticket proto, id: : proto: Marshal called with nil",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := service.CreateTicket(ctx, tc.ticket)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "222",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "CreateTicket, id: 222, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "mockTicketID",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticketID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ticket is found",
|
||||
ticketID: "mockTicketID",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, err expected",
|
||||
ticketID: "",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: not found",
|
||||
},
|
||||
{
|
||||
description: "wrong id passed, err expected",
|
||||
ticketID: "123456",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: 123456 not found",
|
||||
},
|
||||
{
|
||||
description: "item of a wrong type is requested, err expected",
|
||||
ticketID: "wrong-type-key",
|
||||
expectedCode: codes.Internal,
|
||||
expectedMessage: "failed to unmarshal the ticket proto, id: wrong-type-key: proto: can't skip unknown wire type",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ticketActual, errActual := service.GetTicket(ctx, tc.ticketID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
require.NotNil(t, ticketActual)
|
||||
} else {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
res, err := service.GetTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, res)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeleteTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "mockTicketID",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticketID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ticket is found and deleted",
|
||||
ticketID: "mockTicketID",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, no err expected",
|
||||
ticketID: "",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
errActual := service.DeleteTicket(ctx, tc.ticketID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
|
||||
if tc.ticketID != "" {
|
||||
_, errGetTicket := service.GetTicket(ctx, tc.ticketID)
|
||||
require.Error(t, errGetTicket)
|
||||
require.Equal(t, codes.NotFound.String(), status.Convert(errGetTicket).Code().String())
|
||||
}
|
||||
} else {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeleteTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeleteTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestIndexTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
generateTickets(ctx, t, service, 2)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("SMEMBERS", "allTickets"))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockTicketID-1", idsIndexed[1])
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.IndexTicket(ctx, &pb.Ticket{
|
||||
Id: "12345",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "IndexTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeindexTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
generateTickets(ctx, t, service, 2)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("SMEMBERS", "allTickets"))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockTicketID-1", idsIndexed[1])
|
||||
|
||||
// deindex and check that there is only 1 ticket in the returned slice
|
||||
err = service.DeindexTicket(ctx, "mockTicketID-1")
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err = redis.Strings(c.Do("SMEMBERS", "allTickets"))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 1)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeindexTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeindexTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetIndexedIDSet(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, _ := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), "mockTicketID-0"))
|
||||
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
// Sleep until the pending release expired and verify we still have all the tickets
|
||||
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, err = service.GetIndexedIDSet(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetIndexedIDSet, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetTickets(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
res, err := service.GetTickets(ctx, ids)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, tc := range tickets {
|
||||
require.Equal(t, tc.GetId(), res[i].GetId())
|
||||
}
|
||||
|
||||
// pass empty ids slice
|
||||
empty := []string{}
|
||||
res, err = service.GetTickets(ctx, empty)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, res)
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, err = service.GetTickets(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetTickets, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeleteTicketsFromPendingRelease(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), ids[0]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
require.NoError(t, service.DeleteTicketsFromPendingRelease(ctx, ids[:1]))
|
||||
|
||||
// Verify that ticket is deleted from indexed set
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an empty ids slice
|
||||
empty := []string{}
|
||||
require.NoError(t, service.DeleteTicketsFromPendingRelease(ctx, empty))
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeleteTicketsFromPendingRelease(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeleteTicketsFromPendingRelease, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestReleaseAllTickets(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), ids[0]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
require.NoError(t, service.ReleaseAllTickets(ctx))
|
||||
|
||||
// Verify that ticket is deleted from indexed set
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.ReleaseAllTickets(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "ReleaseAllTickets, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestAddTicketsToPendingRelease(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Add 1st ticket to pending release state
|
||||
require.NoError(t, service.AddTicketsToPendingRelease(ctx, ids[:1]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
// Pass an empty ids slice
|
||||
empty := []string{}
|
||||
require.NoError(t, service.AddTicketsToPendingRelease(ctx, empty))
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.AddTicketsToPendingRelease(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "AddTicketsToPendingRelease, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func testConnect(t *testing.T, withSentinel bool, withPassword string) {
|
||||
cfg, closer := createRedis(t, withSentinel, withPassword)
|
||||
defer closer()
|
||||
store := New(cfg)
|
||||
defer store.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
is, ok := store.(*instrumentedService)
|
||||
require.True(t, ok)
|
||||
rb, ok := is.s.(*redisBackend)
|
||||
require.True(t, ok)
|
||||
|
||||
conn, err := rb.redisPool.GetContext(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
|
||||
rply, err := redis.String(conn.Do("PING"))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "PONG", rply)
|
||||
}
|
||||
|
||||
func createRedis(t *testing.T, withSentinel bool, withPassword string) (config.View, func()) {
|
||||
cfg := viper.New()
|
||||
closerFuncs := []func(){}
|
||||
mredis := miniredis.NewMiniRedis()
|
||||
err := mredis.StartAddr("localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start miniredis, %v", err)
|
||||
}
|
||||
closerFuncs = append(closerFuncs, mredis.Close)
|
||||
|
||||
cfg.Set("redis.pool.maxIdle", 5)
|
||||
cfg.Set("redis.pool.idleTimeout", time.Second)
|
||||
cfg.Set("redis.pool.healthCheckTimeout", 100*time.Millisecond)
|
||||
cfg.Set("redis.pool.maxActive", 5)
|
||||
cfg.Set("pendingReleaseTimeout", "200ms")
|
||||
cfg.Set("backoff.initialInterval", 100*time.Millisecond)
|
||||
cfg.Set("backoff.randFactor", 0.5)
|
||||
cfg.Set("backoff.multiplier", 0.5)
|
||||
cfg.Set("backoff.maxInterval", 300*time.Millisecond)
|
||||
cfg.Set("backoff.maxElapsedTime", 100*time.Millisecond)
|
||||
cfg.Set(telemetry.ConfigNameEnableMetrics, true)
|
||||
cfg.Set("assignedDeleteTimeout", 1000*time.Millisecond)
|
||||
|
||||
if withSentinel {
|
||||
s := minisentinel.NewSentinel(mredis)
|
||||
err = s.StartAddr("localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minisentinel, %v", err)
|
||||
}
|
||||
|
||||
closerFuncs = append(closerFuncs, s.Close)
|
||||
cfg.Set("redis.sentinelHostname", s.Host())
|
||||
cfg.Set("redis.sentinelPort", s.Port())
|
||||
cfg.Set("redis.sentinelMaster", s.MasterInfo().Name)
|
||||
cfg.Set("redis.sentinelEnabled", true)
|
||||
// TODO: enable sentinel auth test cases when the library support it.
|
||||
cfg.Set("redis.sentinelUsePassword", false)
|
||||
} else {
|
||||
cfg.Set("redis.hostname", mredis.Host())
|
||||
cfg.Set("redis.port", mredis.Port())
|
||||
}
|
||||
|
||||
if len(withPassword) > 0 {
|
||||
mredis.RequireAuth(withPassword)
|
||||
tmpFile, err := ioutil.TempFile("", "password")
|
||||
if err != nil {
|
||||
t.Fatal("failed to create temp file for password")
|
||||
}
|
||||
if _, err := tmpFile.WriteString(withPassword); err != nil {
|
||||
t.Fatal("failed to write pw to temp file")
|
||||
}
|
||||
|
||||
closerFuncs = append(closerFuncs, func() { os.Remove(tmpFile.Name()) })
|
||||
cfg.Set("redis.usePassword", true)
|
||||
cfg.Set("redis.passwordPath", tmpFile.Name())
|
||||
}
|
||||
|
||||
return cfg, func() {
|
||||
for _, closer := range closerFuncs {
|
||||
closer()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// generateTickets creates a proper amount of ticket, returns a slice of tickets and a slice of tickets ids
|
||||
func generateTickets(ctx context.Context, t *testing.T, service Service, amount int) ([]*pb.Ticket, []string) {
|
||||
tickets := make([]*pb.Ticket, 0, amount)
|
||||
ids := make([]string, 0, amount)
|
||||
|
||||
for i := 0; i < amount; i++ {
|
||||
tmp := &pb.Ticket{
|
||||
Id: fmt.Sprintf("mockTicketID-%d", i),
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
}
|
||||
require.NoError(t, service.CreateTicket(ctx, tmp))
|
||||
require.NoError(t, service.IndexTicket(ctx, tmp))
|
||||
tickets = append(tickets, tmp)
|
||||
ids = append(ids, tmp.GetId())
|
||||
}
|
||||
return tickets, ids
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ func New(t *testing.T, cfg config.Mutable) func() {
|
||||
cfg.Set("redis.pool.maxActive", PoolMaxActive)
|
||||
cfg.Set("redis.pool.idleTimeout", PoolIdleTimeout)
|
||||
cfg.Set("redis.pool.healthCheckTimeout", PoolHealthCheckTimeout)
|
||||
cfg.Set("backfillLockTimeout", "1m")
|
||||
cfg.Set("pendingReleaseTimeout", pendingReleaseTimeout)
|
||||
cfg.Set("assignedDeleteTimeout", assignedDeleteTimeout)
|
||||
cfg.Set("backoff.initialInterval", InitialInterval)
|
||||
|
448
internal/statestore/ticket.go
Normal file
448
internal/statestore/ticket.go
Normal file
@ -0,0 +1,448 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package statestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
allTickets = "allTickets"
|
||||
proposedTicketIDs = "proposed_ticket_ids"
|
||||
)
|
||||
|
||||
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
|
||||
func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "CreateTicket, id: %s, failed to connect to redis: %v", ticket.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := proto.Marshal(ticket)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to marshal the ticket proto, id: %s", ticket.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("SET", ticket.GetId(), value)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to set the value for ticket, id: %s", ticket.GetId())
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
|
||||
func (rb *redisBackend) GetTicket(ctx context.Context, id string) (*pb.Ticket, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := redis.Bytes(redisConn.Do("GET", id))
|
||||
if err != nil {
|
||||
// Return NotFound if redigo did not find the ticket in storage.
|
||||
if err == redis.ErrNil {
|
||||
msg := fmt.Sprintf("Ticket id: %s not found", id)
|
||||
return nil, status.Error(codes.NotFound, msg)
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "failed to get the ticket from state storage, id: %s", id)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
msg := fmt.Sprintf("Ticket id: %s not found", id)
|
||||
return nil, status.Error(codes.NotFound, msg)
|
||||
}
|
||||
|
||||
ticket := &pb.Ticket{}
|
||||
err = proto.Unmarshal(value, ticket)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal the ticket proto, id: %s", id)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return ticket, nil
|
||||
}
|
||||
|
||||
// DeleteTicket removes the Ticket with the specified id from state storage.
|
||||
func (rb *redisBackend) DeleteTicket(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeleteTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
value, err := redis.Int(redisConn.Do("DEL", id))
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to delete the ticket from state storage, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
if value == 0 {
|
||||
return status.Errorf(codes.NotFound, "Ticket id: %s not found", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndexTicket indexes the Ticket id for the configured index fields.
|
||||
func (rb *redisBackend) IndexTicket(ctx context.Context, ticket *pb.Ticket) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "IndexTicket, id: %s, failed to connect to redis: %v", ticket.GetId(), err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("SADD", allTickets, ticket.Id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to add ticket to all tickets, id: %s", ticket.Id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeindexTicket removes the indexing for the specified Ticket. Only the indexes are removed but the Ticket continues to exist.
|
||||
func (rb *redisBackend) DeindexTicket(ctx context.Context, id string) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeindexTicket, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
err = redisConn.Send("SREM", allTickets, id)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to remove ticket from all tickets, id: %s", id)
|
||||
return status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIndexedIds returns the ids of all tickets currently indexed.
|
||||
func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetIndexedIDSet, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
ttl := rb.cfg.GetDuration("pendingReleaseTimeout")
|
||||
curTime := time.Now()
|
||||
endTimeInt := curTime.Add(time.Hour).UnixNano()
|
||||
startTimeInt := curTime.Add(-ttl).UnixNano()
|
||||
|
||||
// Filter out tickets that are fetched but not assigned within ttl time (ms).
|
||||
idsInPendingReleases, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", proposedTicketIDs, startTimeInt, endTimeInt))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting pending release %v", err)
|
||||
}
|
||||
|
||||
idsIndexed, err := redis.Strings(redisConn.Do("SMEMBERS", allTickets))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "error getting all indexed ticket ids %v", err)
|
||||
}
|
||||
|
||||
r := make(map[string]struct{}, len(idsIndexed))
|
||||
for _, id := range idsIndexed {
|
||||
r[id] = struct{}{}
|
||||
}
|
||||
for _, id := range idsInPendingReleases {
|
||||
delete(r, id)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// GetTickets returns multiple tickets from storage. Missing tickets are
|
||||
// silently ignored.
|
||||
func (rb *redisBackend) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "GetTickets, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
queryParams := make([]interface{}, len(ids))
|
||||
for i, id := range ids {
|
||||
queryParams[i] = id
|
||||
}
|
||||
|
||||
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to lookup tickets %v", ids)
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
r := make([]*pb.Ticket, 0, len(ids))
|
||||
|
||||
for i, b := range ticketBytes {
|
||||
// Tickets may be deleted by the time we read it from redis.
|
||||
if b != nil {
|
||||
t := &pb.Ticket{}
|
||||
err = proto.Unmarshal(b, t)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal ticket from redis, key %s", ids[i])
|
||||
return nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
r = append(r, t)
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// UpdateAssignments update using the request's specified tickets with assignments.
|
||||
func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, []*pb.Ticket, error) {
|
||||
resp := &pb.AssignTicketsResponse{}
|
||||
if len(req.Assignments) == 0 {
|
||||
return resp, []*pb.Ticket{}, nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "UpdateAssignments, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
idToA := make(map[string]*pb.Assignment)
|
||||
ids := make([]string, 0)
|
||||
idsI := make([]interface{}, 0)
|
||||
for _, a := range req.Assignments {
|
||||
if a.Assignment == nil {
|
||||
return nil, nil, status.Error(codes.InvalidArgument, "AssignmentGroup.Assignment is required")
|
||||
}
|
||||
|
||||
for _, id := range a.TicketIds {
|
||||
if _, ok := idToA[id]; ok {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "Ticket id %s is assigned multiple times in one assign tickets call", id)
|
||||
}
|
||||
|
||||
idToA[id] = a.Assignment
|
||||
ids = append(ids, id)
|
||||
idsI = append(idsI, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(idsI) == 0 {
|
||||
return nil, nil, status.Error(codes.InvalidArgument, "AssignmentGroupTicketIds is empty")
|
||||
}
|
||||
|
||||
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", idsI...))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tickets := make([]*pb.Ticket, 0, len(ticketBytes))
|
||||
for i, ticketByte := range ticketBytes {
|
||||
// Tickets may be deleted by the time we read it from redis.
|
||||
if ticketByte == nil {
|
||||
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
|
||||
TicketId: ids[i],
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
})
|
||||
} else {
|
||||
t := &pb.Ticket{}
|
||||
err = proto.Unmarshal(ticketByte, t)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal ticket from redis %s", ids[i])
|
||||
return nil, nil, status.Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
tickets = append(tickets, t)
|
||||
}
|
||||
}
|
||||
assignmentTimeout := rb.cfg.GetDuration("assignedDeleteTimeout") / time.Millisecond
|
||||
err = redisConn.Send("MULTI")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error starting redis multi")
|
||||
}
|
||||
|
||||
for _, ticket := range tickets {
|
||||
ticket.Assignment = idToA[ticket.Id]
|
||||
|
||||
var ticketByte []byte
|
||||
ticketByte, err = proto.Marshal(ticket)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "failed to marshal ticket %s", ticket.GetId())
|
||||
}
|
||||
|
||||
err = redisConn.Send("SET", ticket.Id, ticketByte, "PX", int64(assignmentTimeout), "XX")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error sending ticket assignment set")
|
||||
}
|
||||
}
|
||||
|
||||
wasSet, err := redis.Values(redisConn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error executing assignment set")
|
||||
}
|
||||
|
||||
if len(wasSet) != len(tickets) {
|
||||
return nil, nil, status.Errorf(codes.Internal, "sent %d tickets to redis, but received %d back", len(tickets), len(wasSet))
|
||||
}
|
||||
|
||||
assignedTickets := make([]*pb.Ticket, 0, len(tickets))
|
||||
for i, ticket := range tickets {
|
||||
v, err := redis.String(wasSet[i], nil)
|
||||
if err == redis.ErrNil {
|
||||
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
|
||||
TicketId: ticket.Id,
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
})
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unexpected error from redis multi set")
|
||||
}
|
||||
if v != "OK" {
|
||||
return nil, nil, status.Errorf(codes.Internal, "unexpected response from redis: %s", v)
|
||||
}
|
||||
assignedTickets = append(assignedTickets, ticket)
|
||||
}
|
||||
|
||||
return resp, assignedTickets, nil
|
||||
}
|
||||
|
||||
// GetAssignments returns the assignment associated with the input ticket id
|
||||
func (rb *redisBackend) GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "GetAssignments, id: %s, failed to connect to redis: %v", id, err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
backoffOperation := func() error {
|
||||
var ticket *pb.Ticket
|
||||
ticket, err = rb.GetTicket(ctx, id)
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
err = callback(ticket.GetAssignment())
|
||||
if err != nil {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
return status.Error(codes.Unavailable, "listening on assignment updates, waiting for the next backoff")
|
||||
}
|
||||
|
||||
err = backoff.Retry(backoffOperation, rb.newConstantBackoffStrategy())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTicketsToPendingRelease appends new proposed tickets to the proposed sorted set with current timestamp
|
||||
func (rb *redisBackend) AddTicketsToPendingRelease(ctx context.Context, ids []string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "AddTicketsToPendingRelease, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
currentTime := time.Now().UnixNano()
|
||||
cmds := make([]interface{}, 0, 2*len(ids)+1)
|
||||
cmds = append(cmds, proposedTicketIDs)
|
||||
for _, id := range ids {
|
||||
cmds = append(cmds, currentTime, id)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("ZADD", cmds...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to append proposed tickets to pending release")
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTicketsFromPendingRelease deletes tickets from the proposed sorted set
|
||||
func (rb *redisBackend) DeleteTicketsFromPendingRelease(ctx context.Context, ids []string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "DeleteTicketsFromPendingRelease, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
cmds := make([]interface{}, 0, len(ids)+1)
|
||||
cmds = append(cmds, proposedTicketIDs)
|
||||
for _, id := range ids {
|
||||
cmds = append(cmds, id)
|
||||
}
|
||||
|
||||
_, err = redisConn.Do("ZREM", cmds...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to delete proposed tickets from pending release")
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redisBackend) ReleaseAllTickets(ctx context.Context) error {
|
||||
redisConn, err := rb.redisPool.GetContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "ReleaseAllTickets, failed to connect to redis: %v", err)
|
||||
}
|
||||
defer handleConnectionClose(&redisConn)
|
||||
|
||||
_, err = redisConn.Do("DEL", proposedTicketIDs)
|
||||
return err
|
||||
}
|
||||
|
||||
func (rb *redisBackend) newConstantBackoffStrategy() backoff.BackOff {
|
||||
backoffStrat := backoff.NewConstantBackOff(rb.cfg.GetDuration("backoff.initialInterval"))
|
||||
return backoff.BackOff(backoffStrat)
|
||||
}
|
||||
|
||||
// TODO: add cache the backoff object
|
||||
// nolint: unused
|
||||
func (rb *redisBackend) newExponentialBackoffStrategy() backoff.BackOff {
|
||||
backoffStrat := backoff.NewExponentialBackOff()
|
||||
backoffStrat.InitialInterval = rb.cfg.GetDuration("backoff.initialInterval")
|
||||
backoffStrat.RandomizationFactor = rb.cfg.GetFloat64("backoff.randFactor")
|
||||
backoffStrat.Multiplier = rb.cfg.GetFloat64("backoff.multiplier")
|
||||
backoffStrat.MaxInterval = rb.cfg.GetDuration("backoff.maxInterval")
|
||||
backoffStrat.MaxElapsedTime = rb.cfg.GetDuration("backoff.maxElapsedTime")
|
||||
return backoff.BackOff(backoffStrat)
|
||||
}
|
994
internal/statestore/ticket_test.go
Normal file
994
internal/statestore/ticket_test.go
Normal file
@ -0,0 +1,994 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package statestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Bose/minisentinel"
|
||||
miniredis "github.com/alicebob/miniredis/v2"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/rs/xid"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestStatestoreSetup(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
}
|
||||
|
||||
func TestTicketLifecycle(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
// Initialize test data
|
||||
id := xid.New().String()
|
||||
ticket := &pb.Ticket{
|
||||
Id: id,
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"testindex1": 42,
|
||||
},
|
||||
},
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: "test-tbd",
|
||||
},
|
||||
}
|
||||
|
||||
// Validate that GetTicket fails for a Ticket that does not exist.
|
||||
_, err := service.GetTicket(ctx, id)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, status.Code(err), codes.NotFound)
|
||||
|
||||
// Validate nonexisting Ticket deletion
|
||||
err = service.DeleteTicket(ctx, id)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, status.Code(err), codes.NotFound)
|
||||
|
||||
// Validate nonexisting Ticket deindexing
|
||||
err = service.DeindexTicket(ctx, id)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Validate Ticket creation
|
||||
err = service.CreateTicket(ctx, ticket)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Validate Ticket retrival
|
||||
result, err := service.GetTicket(ctx, ticket.Id)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, ticket.Id, result.Id)
|
||||
require.Equal(t, ticket.SearchFields.DoubleArgs["testindex1"], result.SearchFields.DoubleArgs["testindex1"])
|
||||
require.NotNil(t, result.Assignment)
|
||||
require.Equal(t, ticket.Assignment.Connection, result.Assignment.Connection)
|
||||
|
||||
// Validate Ticket deletion
|
||||
err = service.DeleteTicket(ctx, id)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = service.GetTicket(ctx, id)
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestGetAssignmentBeforeSet(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
var assignmentResp *pb.Assignment
|
||||
|
||||
err := service.GetAssignments(ctx, "id", func(assignment *pb.Assignment) error {
|
||||
assignmentResp = assignment
|
||||
return nil
|
||||
})
|
||||
// GetAssignment failed because the ticket does not exists
|
||||
require.Equal(t, status.Convert(err).Code(), codes.NotFound)
|
||||
require.Nil(t, assignmentResp)
|
||||
}
|
||||
|
||||
func TestGetAssignmentNormal(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
var assignmentResp *pb.Assignment
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
callbackCount := 0
|
||||
returnedErr := errors.New("some errors")
|
||||
|
||||
err = service.GetAssignments(ctx, "1", func(assignment *pb.Assignment) error {
|
||||
assignmentResp = assignment
|
||||
|
||||
if callbackCount == 5 {
|
||||
cancel()
|
||||
return returnedErr
|
||||
} else if callbackCount > 0 {
|
||||
// Test the assignment returned was successfully passed in to the callback function
|
||||
require.Equal(t, assignmentResp.Connection, "2")
|
||||
}
|
||||
|
||||
callbackCount++
|
||||
return nil
|
||||
})
|
||||
|
||||
// Test GetAssignments was retried for 5 times and returned with expected error
|
||||
require.Equal(t, 5, callbackCount)
|
||||
require.Equal(t, returnedErr, err)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.GetAssignments(ctx, "1", func(assignment *pb.Assignment) error { return nil })
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetAssignments, id: 1, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestUpdateAssignments(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
|
||||
require.NoError(t, err)
|
||||
|
||||
type expected struct {
|
||||
resp *pb.AssignTicketsResponse
|
||||
errCode codes.Code
|
||||
errMessage string
|
||||
assignedTicketsIDs []string
|
||||
}
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
request *pb.AssignTicketsRequest
|
||||
expected
|
||||
}{
|
||||
{
|
||||
description: "no assignments, empty response is returned",
|
||||
request: &pb.AssignTicketsRequest{},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "updated assignments, no errors",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{"1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "empty ticketIds, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.InvalidArgument,
|
||||
errMessage: "AssignmentGroupTicketIds is empty",
|
||||
assignedTicketsIDs: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "nil assignment, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.InvalidArgument,
|
||||
errMessage: "AssignmentGroup.Assignment is required",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "ticket is assigned multiple times, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.InvalidArgument,
|
||||
errMessage: "Ticket id 1 is assigned multiple times in one assign tickets call",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "ticket doesn't exist, no error, response failure expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"11111"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: &pb.AssignTicketsResponse{
|
||||
Failures: []*pb.AssignmentFailure{{
|
||||
TicketId: "11111",
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
}},
|
||||
},
|
||||
errCode: codes.OK,
|
||||
errMessage: "",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "wrong value, error expected",
|
||||
request: &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"wrong-type-key"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: expected{
|
||||
resp: nil,
|
||||
errCode: codes.Internal,
|
||||
errMessage: "failed to unmarshal ticket from redis wrong-type-key",
|
||||
assignedTicketsIDs: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
resp, ticketsAssignedActual, errActual := service.UpdateAssignments(ctx, tc.request)
|
||||
if tc.expected.errCode != codes.OK {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expected.errCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expected.errMessage)
|
||||
} else {
|
||||
require.NoError(t, errActual)
|
||||
require.Equal(t, tc.expected.resp, resp)
|
||||
require.Equal(t, len(tc.expected.assignedTicketsIDs), len(ticketsAssignedActual))
|
||||
|
||||
for _, ticket := range ticketsAssignedActual {
|
||||
found := false
|
||||
for _, id := range tc.expected.assignedTicketsIDs {
|
||||
if ticket.GetId() == id {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Truef(t, found, "assigned ticket ID %s is not found in an expected slice", ticket.GetId())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, _, err = service.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{"11111"},
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "UpdateAssignments, failed to connect to redis: context canceled")
|
||||
}
|
||||
|
||||
func TestConnect(t *testing.T) {
|
||||
testConnect(t, false, "")
|
||||
testConnect(t, false, "redispassword")
|
||||
testConnect(t, true, "")
|
||||
testConnect(t, true, "redispassword")
|
||||
}
|
||||
|
||||
func TestHealthCheck(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
// OK
|
||||
ctx := utilTesting.NewContext(t)
|
||||
err := service.HealthCheck(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Error expected
|
||||
closer()
|
||||
err = service.HealthCheck(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
}
|
||||
|
||||
func TestCreateTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, true, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticket *pb.Ticket
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ok",
|
||||
ticket: &pb.Ticket{
|
||||
Id: "1",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
},
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "nil ticket passed, err expected",
|
||||
ticket: nil,
|
||||
expectedCode: codes.Internal,
|
||||
expectedMessage: "failed to marshal the ticket proto, id: : proto: Marshal called with nil",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := service.CreateTicket(ctx, tc.ticket)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "222",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "CreateTicket, id: 222, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "mockTicketID",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticketID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ticket is found",
|
||||
ticketID: "mockTicketID",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, err expected",
|
||||
ticketID: "",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: not found",
|
||||
},
|
||||
{
|
||||
description: "wrong id passed, err expected",
|
||||
ticketID: "123456",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: 123456 not found",
|
||||
},
|
||||
{
|
||||
description: "item of a wrong type is requested, err expected",
|
||||
ticketID: "wrong-type-key",
|
||||
expectedCode: codes.Internal,
|
||||
expectedMessage: "failed to unmarshal the ticket proto, id: wrong-type-key: proto:",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ticketActual, errActual := service.GetTicket(ctx, tc.ticketID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
require.NotNil(t, ticketActual)
|
||||
} else {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
res, err := service.GetTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, res)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeleteTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
err := service.CreateTicket(ctx, &pb.Ticket{
|
||||
Id: "mockTicketID",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var testCases = []struct {
|
||||
description string
|
||||
ticketID string
|
||||
expectedCode codes.Code
|
||||
expectedMessage string
|
||||
}{
|
||||
{
|
||||
description: "ticket is found and deleted",
|
||||
ticketID: "mockTicketID",
|
||||
expectedCode: codes.OK,
|
||||
expectedMessage: "",
|
||||
},
|
||||
{
|
||||
description: "empty id passed, err expected",
|
||||
ticketID: "",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: not found",
|
||||
},
|
||||
{
|
||||
description: "wrong id passed, err expected",
|
||||
ticketID: "123456",
|
||||
expectedCode: codes.NotFound,
|
||||
expectedMessage: "Ticket id: 123456 not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
errActual := service.DeleteTicket(ctx, tc.ticketID)
|
||||
if tc.expectedCode == codes.OK {
|
||||
require.NoError(t, errActual)
|
||||
|
||||
_, errGetTicket := service.GetTicket(ctx, tc.ticketID)
|
||||
require.Error(t, errGetTicket)
|
||||
require.Equal(t, codes.NotFound.String(), status.Convert(errGetTicket).Code().String())
|
||||
} else {
|
||||
require.Error(t, errActual)
|
||||
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
|
||||
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeleteTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeleteTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestIndexTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
generateTickets(ctx, t, service, 2)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("SMEMBERS", allTickets))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockTicketID-1", idsIndexed[1])
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.IndexTicket(ctx, &pb.Ticket{
|
||||
Id: "12345",
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "IndexTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeindexTicket(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
generateTickets(ctx, t, service, 2)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err := redis.Strings(c.Do("SMEMBERS", "allTickets"))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 2)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
require.Equal(t, "mockTicketID-1", idsIndexed[1])
|
||||
|
||||
// deindex and check that there is only 1 ticket in the returned slice
|
||||
err = service.DeindexTicket(ctx, "mockTicketID-1")
|
||||
require.NoError(t, err)
|
||||
idsIndexed, err = redis.Strings(c.Do("SMEMBERS", "allTickets"))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, idsIndexed, 1)
|
||||
require.Equal(t, "mockTicketID-0", idsIndexed[0])
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeindexTicket(ctx, "12345")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeindexTicket, id: 12345, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetIndexedIDSet(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, _ := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), "mockTicketID-0"))
|
||||
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
// Sleep until the pending release expired and verify we still have all the tickets
|
||||
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, err = service.GetIndexedIDSet(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetIndexedIDSet, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestGetTickets(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
res, err := service.GetTickets(ctx, ids)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, tc := range tickets {
|
||||
require.Equal(t, tc.GetId(), res[i].GetId())
|
||||
}
|
||||
|
||||
// pass empty ids slice
|
||||
empty := []string{}
|
||||
res, err = service.GetTickets(ctx, empty)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, res)
|
||||
|
||||
// pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
_, err = service.GetTickets(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "GetTickets, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestDeleteTicketsFromPendingRelease(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), ids[0]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
require.NoError(t, service.DeleteTicketsFromPendingRelease(ctx, ids[:1]))
|
||||
|
||||
// Verify that ticket is deleted from indexed set
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an empty ids slice
|
||||
empty := []string{}
|
||||
require.NoError(t, service.DeleteTicketsFromPendingRelease(ctx, empty))
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.DeleteTicketsFromPendingRelease(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "DeleteTicketsFromPendingRelease, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestReleaseAllTickets(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
|
||||
require.NoError(t, err)
|
||||
// Add the first ticket to the pending release and verify changes are reflected in the result
|
||||
redis.Strings(c.Do("ZADD", "proposed_ticket_ids", time.Now().UnixNano(), ids[0]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
require.NoError(t, service.ReleaseAllTickets(ctx))
|
||||
|
||||
// Verify that ticket is deleted from indexed set
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err = service.ReleaseAllTickets(ctx)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "ReleaseAllTickets, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func TestAddTicketsToPendingRelease(t *testing.T) {
|
||||
cfg, closer := createRedis(t, false, "")
|
||||
defer closer()
|
||||
service := New(cfg)
|
||||
require.NotNil(t, service)
|
||||
defer service.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
tickets, ids := generateTickets(ctx, t, service, 2)
|
||||
|
||||
verifyTickets := func(service Service, tickets []*pb.Ticket) {
|
||||
ids, err := service.GetIndexedIDSet(ctx)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(tickets), len(ids))
|
||||
|
||||
for _, tt := range tickets {
|
||||
_, ok := ids[tt.GetId()]
|
||||
require.True(t, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all tickets are created and returned
|
||||
verifyTickets(service, tickets)
|
||||
|
||||
// Add 1st ticket to pending release state
|
||||
require.NoError(t, service.AddTicketsToPendingRelease(ctx, ids[:1]))
|
||||
|
||||
// Verify 1 ticket is indexed
|
||||
verifyTickets(service, tickets[1:2])
|
||||
|
||||
// Pass an empty ids slice
|
||||
empty := []string{}
|
||||
require.NoError(t, service.AddTicketsToPendingRelease(ctx, empty))
|
||||
|
||||
// Pass an expired context, err expected
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
service = New(cfg)
|
||||
err := service.AddTicketsToPendingRelease(ctx, ids)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
|
||||
require.Contains(t, status.Convert(err).Message(), "AddTicketsToPendingRelease, failed to connect to redis:")
|
||||
}
|
||||
|
||||
func testConnect(t *testing.T, withSentinel bool, withPassword string) {
|
||||
cfg, closer := createRedis(t, withSentinel, withPassword)
|
||||
defer closer()
|
||||
store := New(cfg)
|
||||
defer store.Close()
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
is, ok := store.(*instrumentedService)
|
||||
require.True(t, ok)
|
||||
rb, ok := is.s.(*redisBackend)
|
||||
require.True(t, ok)
|
||||
|
||||
conn, err := rb.redisPool.GetContext(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, conn)
|
||||
|
||||
rply, err := redis.String(conn.Do("PING"))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "PONG", rply)
|
||||
}
|
||||
|
||||
func createRedis(t *testing.T, withSentinel bool, withPassword string) (config.View, func()) {
|
||||
cfg := viper.New()
|
||||
closerFuncs := []func(){}
|
||||
mredis := miniredis.NewMiniRedis()
|
||||
err := mredis.StartAddr("localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start miniredis, %v", err)
|
||||
}
|
||||
closerFuncs = append(closerFuncs, mredis.Close)
|
||||
|
||||
cfg.Set("redis.pool.maxIdle", 5)
|
||||
cfg.Set("redis.pool.idleTimeout", time.Second)
|
||||
cfg.Set("redis.pool.healthCheckTimeout", 100*time.Millisecond)
|
||||
cfg.Set("redis.pool.maxActive", 5)
|
||||
cfg.Set("backfillLockTimeout", "1m")
|
||||
cfg.Set("pendingReleaseTimeout", "200ms")
|
||||
cfg.Set("backoff.initialInterval", 100*time.Millisecond)
|
||||
cfg.Set("backoff.randFactor", 0.5)
|
||||
cfg.Set("backoff.multiplier", 0.5)
|
||||
cfg.Set("backoff.maxInterval", 300*time.Millisecond)
|
||||
cfg.Set("backoff.maxElapsedTime", 100*time.Millisecond)
|
||||
cfg.Set(telemetry.ConfigNameEnableMetrics, true)
|
||||
cfg.Set("assignedDeleteTimeout", 1000*time.Millisecond)
|
||||
|
||||
if withSentinel {
|
||||
s := minisentinel.NewSentinel(mredis)
|
||||
err = s.StartAddr("localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minisentinel, %v", err)
|
||||
}
|
||||
|
||||
closerFuncs = append(closerFuncs, s.Close)
|
||||
cfg.Set("redis.sentinelHostname", s.Host())
|
||||
cfg.Set("redis.sentinelPort", s.Port())
|
||||
cfg.Set("redis.sentinelMaster", s.MasterInfo().Name)
|
||||
cfg.Set("redis.sentinelEnabled", true)
|
||||
// TODO: enable sentinel auth test cases when the library support it.
|
||||
cfg.Set("redis.sentinelUsePassword", false)
|
||||
} else {
|
||||
cfg.Set("redis.hostname", mredis.Host())
|
||||
cfg.Set("redis.port", mredis.Port())
|
||||
}
|
||||
|
||||
if len(withPassword) > 0 {
|
||||
mredis.RequireAuth(withPassword)
|
||||
tmpFile, err := ioutil.TempFile("", "password")
|
||||
if err != nil {
|
||||
t.Fatal("failed to create temp file for password")
|
||||
}
|
||||
if _, err := tmpFile.WriteString(withPassword); err != nil {
|
||||
t.Fatal("failed to write pw to temp file")
|
||||
}
|
||||
|
||||
closerFuncs = append(closerFuncs, func() { os.Remove(tmpFile.Name()) })
|
||||
cfg.Set("redis.usePassword", true)
|
||||
cfg.Set("redis.passwordPath", tmpFile.Name())
|
||||
}
|
||||
|
||||
return cfg, func() {
|
||||
for _, closer := range closerFuncs {
|
||||
closer()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// generateTickets creates a proper amount of ticket, returns a slice of tickets and a slice of tickets ids
|
||||
func generateTickets(ctx context.Context, t *testing.T, service Service, amount int) ([]*pb.Ticket, []string) {
|
||||
tickets := make([]*pb.Ticket, 0, amount)
|
||||
ids := make([]string, 0, amount)
|
||||
|
||||
for i := 0; i < amount; i++ {
|
||||
tmp := &pb.Ticket{
|
||||
Id: fmt.Sprintf("mockTicketID-%d", i),
|
||||
Assignment: &pb.Assignment{Connection: "2"},
|
||||
}
|
||||
require.NoError(t, service.CreateTicket(ctx, tmp))
|
||||
require.NoError(t, service.IndexTicket(ctx, tmp))
|
||||
tickets = append(tickets, tmp)
|
||||
ids = append(ids, tmp.GetId())
|
||||
}
|
||||
return tickets, ids
|
||||
}
|
680
internal/testing/e2e/backfill_test.go
Normal file
680
internal/testing/e2e/backfill_test.go
Normal file
@ -0,0 +1,680 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestCreateGetBackfill(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bf := &pb.CreateBackfillRequest{Backfill: &pb.Backfill{SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
}}}
|
||||
b1, err := om.Frontend().CreateBackfill(ctx, bf)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b1)
|
||||
|
||||
b2, err := om.Frontend().CreateBackfill(ctx, bf)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b2)
|
||||
|
||||
// Different IDs should be generated
|
||||
require.NotEqual(t, b1.Id, b2.Id)
|
||||
matched, err := regexp.MatchString(`[0-9a-v]{20}`, b1.GetId())
|
||||
require.True(t, matched)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bf.Backfill.SearchFields.DoubleArgs["test-arg"], b1.SearchFields.DoubleArgs["test-arg"])
|
||||
b1.Id = b2.Id
|
||||
b1.CreateTime = b2.CreateTime
|
||||
|
||||
// All fields other than CreateTime and Id fields should be equal
|
||||
require.Equal(t, b1, b2)
|
||||
require.NoError(t, err)
|
||||
actual, err := om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: b1.Id})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, actual)
|
||||
require.Equal(t, b1, actual)
|
||||
|
||||
// Get Backfill which is not present - NotFound
|
||||
actual, err = om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: b1.Id + "new"})
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, codes.NotFound.String(), status.Convert(err).Code().String())
|
||||
require.Nil(t, actual)
|
||||
}
|
||||
|
||||
// TestBackfillFrontendLifecycle Create, Get and Update Backfill test
|
||||
func TestBackfillFrontendLifecycle(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bf := &pb.Backfill{SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
}}
|
||||
|
||||
createdBf, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: bf})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), createdBf.Generation)
|
||||
|
||||
createdBf.SearchFields.StringArgs["key"] = "val"
|
||||
|
||||
orig := &anypb.Any{Value: []byte("test")}
|
||||
val, err := ptypes.MarshalAny(orig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a different Backfill, but with the same ID
|
||||
// Pass different time
|
||||
bf2 := &pb.Backfill{
|
||||
CreateTime: ptypes.TimestampNow(),
|
||||
Id: createdBf.Id,
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"key": "val",
|
||||
"search": "me",
|
||||
},
|
||||
},
|
||||
Generation: 42,
|
||||
Extensions: map[string]*any.Any{"key": val},
|
||||
}
|
||||
updatedBf, err := om.Frontend().UpdateBackfill(ctx, &pb.UpdateBackfillRequest{Backfill: bf2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(2), updatedBf.Generation)
|
||||
|
||||
// No changes to CreateTime
|
||||
require.Equal(t, createdBf.CreateTime.GetNanos(), updatedBf.CreateTime.GetNanos())
|
||||
|
||||
get, err := om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: createdBf.Id})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bf2.SearchFields.StringArgs, get.SearchFields.StringArgs)
|
||||
|
||||
unpacked := &anypb.Any{}
|
||||
err = ptypes.UnmarshalAny(get.Extensions["key"], unpacked)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, unpacked.Value, orig.Value)
|
||||
_, err = om.Frontend().DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: createdBf.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
get, err = om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: createdBf.Id})
|
||||
require.Error(t, err, fmt.Sprintf("Backfill id: %s not found", bf.Id))
|
||||
require.Nil(t, get)
|
||||
}
|
||||
|
||||
// TestAcknowledgeBackfill checks that tickets got assigned
|
||||
// to the same Connection as it is provided in AcknowledgeBackfill request
|
||||
func TestAcknowledgeBackfill(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bf := &pb.Backfill{SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
}}
|
||||
createdBf, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: bf})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), createdBf.Generation)
|
||||
|
||||
ticketIDs := createMatchWithBackfill(ctx, om, createdBf, t)
|
||||
|
||||
conn := "127.0.0.1:4242"
|
||||
getResp, err := om.Frontend().AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{
|
||||
BackfillId: createdBf.Id,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: conn,
|
||||
Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 10,
|
||||
}),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NotNil(t, getResp)
|
||||
require.NotNil(t, getResp.Backfill)
|
||||
require.NotNil(t, getResp.Tickets)
|
||||
require.Equal(t, len(ticketIDs), len(getResp.Tickets))
|
||||
require.NoError(t, err)
|
||||
|
||||
respTicketIds := make([]string, len(getResp.Tickets))
|
||||
|
||||
for _, rt := range getResp.Tickets {
|
||||
respTicketIds = append(respTicketIds, rt.Id)
|
||||
}
|
||||
|
||||
for _, v := range ticketIDs {
|
||||
ticket, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: v})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, respTicketIds, ticket.Id)
|
||||
require.NotNil(t, ticket.Assignment)
|
||||
require.Equal(t, conn, ticket.Assignment.Connection)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcknowledgeBackfillDeletedTicket checks that ticket deletion
|
||||
// does not block other tickets in backfill from being assigned
|
||||
func TestAcknowledgeBackfillDeletedTicket(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bf := &pb.Backfill{SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
},
|
||||
}
|
||||
createdBf, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: bf})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), createdBf.Generation)
|
||||
|
||||
ticketIDs := createMatchWithBackfill(ctx, om, createdBf, t)
|
||||
|
||||
// Delete 1st ticket
|
||||
om.Frontend().DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticketIDs[0]})
|
||||
conn := "127.0.0.1:4242"
|
||||
getResp, err := om.Frontend().AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: createdBf.Id, Assignment: &pb.Assignment{Connection: conn, Extensions: map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 10,
|
||||
}),
|
||||
}}})
|
||||
require.NotNil(t, getResp)
|
||||
require.NotNil(t, getResp.Backfill)
|
||||
require.NotNil(t, getResp.Tickets)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that an error on 1st ticket assignment does not change 2nd ticket assignment
|
||||
ticket, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: ticketIDs[1]})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ticket.Assignment)
|
||||
require.Equal(t, conn, ticket.Assignment.Connection)
|
||||
}
|
||||
|
||||
func createMatchWithBackfill(ctx context.Context, om *om, b *pb.Backfill, t *testing.T) []string {
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{
|
||||
Ticket: &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{
|
||||
Ticket: &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{t1, t2},
|
||||
Backfill: b,
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- m
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
bfID := resp.Match.Backfill.Id
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
|
||||
actual, err := om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: bfID})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, actual)
|
||||
|
||||
// Backfill Generation should be autoincremented if updated after MMF run
|
||||
// default Generation is 1 if not set (on Create)
|
||||
b.Generation++
|
||||
b.Id = actual.Id
|
||||
b.CreateTime = actual.CreateTime
|
||||
require.True(t, proto.Equal(b, actual))
|
||||
return []string{t1.Id, t2.Id}
|
||||
}
|
||||
|
||||
func TestProposedBackfillCreate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{
|
||||
Ticket: &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
b := &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
}
|
||||
m := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{t1},
|
||||
Backfill: b,
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- m
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
|
||||
{
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NotNil(t, resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
{
|
||||
stream, err := om.query.QueryBackfills(ctx, &pb.QueryBackfillsRequest{
|
||||
Pool: &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Backfills, 1)
|
||||
|
||||
b.Id = resp.Backfills[0].Id
|
||||
b.Generation = 1
|
||||
b.CreateTime = resp.Backfills[0].CreateTime
|
||||
require.True(t, proto.Equal(b, resp.Backfills[0]))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{{StringArg: "field", Value: "value"}},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Recv()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposedBackfillUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{
|
||||
Ticket: &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
b.SearchFields = &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field1": "value1",
|
||||
},
|
||||
}
|
||||
//using DefaultEvaluationCriteria just for testing purposes only
|
||||
b.Extensions = map[string]*any.Any{
|
||||
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
|
||||
Score: 10,
|
||||
}),
|
||||
}
|
||||
m := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{t1},
|
||||
Backfill: b,
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- m
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
|
||||
{
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Nil(t, err)
|
||||
require.True(t, proto.Equal(m, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
{
|
||||
stream, err := om.query.QueryBackfills(ctx, &pb.QueryBackfillsRequest{
|
||||
Pool: &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field1",
|
||||
Value: "value1",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Backfills, 1)
|
||||
|
||||
// Backfill Generation should be autoincremented
|
||||
b.Generation++
|
||||
require.True(t, proto.Equal(b, resp.Backfills[0]))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{{StringArg: "field", Value: "value"}},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = stream.Recv()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackfillGenerationMismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
b.Generation = 0
|
||||
m := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{t1},
|
||||
Backfill: b,
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- m
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
|
||||
{
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{{StringArg: "field", Value: "value"}},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Tickets, 1)
|
||||
require.Equal(t, t1.Id, resp.Tickets[0].Id)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanUpExpiredBackfills(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, t1)
|
||||
|
||||
b1, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
}}})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b1)
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
// wait until backfill is expired, then try to get it
|
||||
time.Sleep(pendingReleaseTimeout * 2)
|
||||
|
||||
// statestore.CleanupBackfills is called at the end of each synchronizer cycle after fetch matches call, so expired backfill will be removed
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
|
||||
// call FetchMatches twice in order to give backfills time to be completely cleaned up
|
||||
stream, err = om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
|
||||
_, err = om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: b1.Id})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, fmt.Sprintf("rpc error: code = NotFound desc = Backfill id: %s not found", b1.Id), err.Error())
|
||||
}
|
||||
|
||||
func TestBackfillSkipNotfoundError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, t1)
|
||||
|
||||
b1, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"search": "me",
|
||||
},
|
||||
}}})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b1)
|
||||
|
||||
m := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{t1},
|
||||
Backfill: b1,
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- m
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
|
||||
// Delete Backfill to cause NotFound error
|
||||
_, err = om.Frontend().DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: b1.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
resp, err := stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, io.EOF.Error(), err.Error())
|
||||
|
||||
_, err = om.Frontend().GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: b1.Id})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, fmt.Sprintf("rpc error: code = NotFound desc = Backfill id: %s not found", b1.Id), err.Error())
|
||||
}
|
||||
|
||||
func mustAny(m proto.Message) *any.Any {
|
||||
result, err := ptypes.MarshalAny(m)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
//go:build e2ecluster
|
||||
// +build e2ecluster
|
||||
|
||||
// Copyright 2019 Google LLC
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build e2ecluster
|
||||
// +build e2ecluster
|
||||
|
||||
// Copyright 2019 Google LLC
|
||||
@ -20,6 +21,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
@ -33,7 +35,6 @@ import (
|
||||
"open-match.dev/open-match/internal/config"
|
||||
mmfService "open-match.dev/open-match/internal/testing/mmf"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestServiceHealth(t *testing.T) {
|
||||
|
@ -162,17 +162,18 @@ func (om *om) MMFConfigHTTP() *pb.FunctionConfig {
|
||||
// that parsing bugs can't hide logic bugs.
|
||||
const registrationInterval = time.Millisecond * 200
|
||||
const proposalCollectionInterval = time.Millisecond * 200
|
||||
const pendingReleaseTimeout = time.Millisecond * 200
|
||||
const pendingReleaseTimeout = time.Second * 1
|
||||
const assignedDeleteTimeout = time.Millisecond * 200
|
||||
|
||||
// configFile is the "cononical" test config. It exactly matches the configmap
|
||||
// configFile is the "canonical" test config. It exactly matches the configmap
|
||||
// which is used in the real cluster tests.
|
||||
const configFile = `
|
||||
registrationInterval: 200ms
|
||||
proposalCollectionInterval: 200ms
|
||||
pendingReleaseTimeout: 200ms
|
||||
pendingReleaseTimeout: 1s
|
||||
assignedDeleteTimeout: 200ms
|
||||
queryPageSize: 10
|
||||
backfillLockTimeout: 1m
|
||||
|
||||
logging:
|
||||
level: debug
|
||||
@ -221,7 +222,7 @@ redis:
|
||||
sentinelPort: 26379
|
||||
sentinelMaster: om-redis-master
|
||||
sentinelHostname: open-match-redis
|
||||
sentinelUsePassword:
|
||||
sentinelUsePassword: false
|
||||
usePassword: false
|
||||
passwordPath: /opt/bitnami/redis/secrets/redis-password
|
||||
pool:
|
||||
|
@ -72,7 +72,8 @@ func TestHappyPath(t *testing.T) {
|
||||
require.True(t, proto.Equal(m, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -158,7 +159,8 @@ func TestMatchFunctionMatchCollision(t *testing.T) {
|
||||
require.True(t, time.Since(startTime) < registrationInterval, "%s", time.Since(startTime))
|
||||
|
||||
resp, err = sSuccess.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -330,7 +332,8 @@ func TestMatchWithNoTickets(t *testing.T) {
|
||||
require.True(t, proto.Equal(m, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -411,11 +414,12 @@ func TestNoMatches(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
// TestNoMatches covers missing the profile field on fetch matches.
|
||||
// TestNoProfile covers missing the profile field on fetch matches.
|
||||
func TestNoProfile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
@ -552,7 +556,8 @@ func TestStreaming(t *testing.T) {
|
||||
require.True(t, proto.Equal(m2, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -584,7 +589,8 @@ func TestRegistrationWindow(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
require.True(t, time.Since(startTime) > registrationInterval, "%s", time.Since(startTime))
|
||||
}
|
||||
@ -691,7 +697,8 @@ func TestMultipleFetchCalls(t *testing.T) {
|
||||
require.True(t, proto.Equal(m1, resp.Match))
|
||||
|
||||
resp, err = s1.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
|
||||
resp, err = s2.Recv()
|
||||
@ -699,7 +706,8 @@ func TestMultipleFetchCalls(t *testing.T) {
|
||||
require.True(t, proto.Equal(m2, resp.Match))
|
||||
|
||||
resp, err = s2.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -784,7 +792,8 @@ func TestSlowBackendDoesntBlock(t *testing.T) {
|
||||
require.True(t, proto.Equal(m2, resp.Match))
|
||||
|
||||
resp, err = s2.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
|
||||
resp, err = s1.Recv()
|
||||
@ -792,7 +801,8 @@ func TestSlowBackendDoesntBlock(t *testing.T) {
|
||||
require.True(t, proto.Equal(m1, resp.Match))
|
||||
|
||||
resp, err = s1.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -838,6 +848,7 @@ func TestHTTPMMF(t *testing.T) {
|
||||
require.True(t, proto.Equal(m, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//go:build !e2ecluster
|
||||
// +build !e2ecluster
|
||||
|
||||
// Copyright 2019 Google LLC
|
||||
|
219
internal/testing/e2e/query_backfills_test.go
Normal file
219
internal/testing/e2e/query_backfills_test.go
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
firstBackfillGeneration = 1
|
||||
)
|
||||
|
||||
func TestQueryBackfillsWithEmptyPool(t *testing.T) {
|
||||
om := newOM(t)
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: nil})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
func TestNoBackfills(t *testing.T) {
|
||||
om := newOM(t)
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: &pb.Pool{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.Equal(t, io.EOF, err)
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
func TestQueryBackfillsPaging(t *testing.T) {
|
||||
om := newOM(t)
|
||||
|
||||
pageSize := 10 // TODO: read from config
|
||||
if pageSize < 1 {
|
||||
require.Fail(t, "invalid page size")
|
||||
}
|
||||
|
||||
total := pageSize*5 + 1
|
||||
expectedIds := map[string]struct{}{}
|
||||
|
||||
for i := 0; i < total; i++ {
|
||||
resp, err := om.Frontend().CreateBackfill(context.Background(), &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}})
|
||||
require.NotNil(t, resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedIds[resp.Id] = struct{}{}
|
||||
}
|
||||
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: &pb.Pool{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
foundIds := map[string]struct{}{}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
var resp *pb.QueryBackfillsResponse
|
||||
resp, err = stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pageSize, len(resp.Backfills))
|
||||
|
||||
for _, backfill := range resp.Backfills {
|
||||
foundIds[backfill.Id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(resp.Backfills), 1)
|
||||
foundIds[resp.Backfills[0].Id] = struct{}{}
|
||||
|
||||
require.Equal(t, expectedIds, foundIds)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
func TestBackfillQueryAfterMMFUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
om := newOM(t)
|
||||
backfill := &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
}
|
||||
pool := &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{{StringArg: "field", Value: "value"}},
|
||||
}
|
||||
match := &pb.Match{
|
||||
MatchId: "1",
|
||||
Tickets: []*pb.Ticket{},
|
||||
}
|
||||
{
|
||||
resp, err := om.Frontend().CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: backfill})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
match.Backfill = resp
|
||||
}
|
||||
|
||||
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
|
||||
out <- match
|
||||
return nil
|
||||
})
|
||||
|
||||
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
p, ok := <-in
|
||||
require.True(t, ok)
|
||||
|
||||
out <- p.MatchId
|
||||
return nil
|
||||
})
|
||||
{
|
||||
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
|
||||
Config: om.MMFConfigGRPC(),
|
||||
Profile: &pb.MatchProfile{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.True(t, proto.Equal(match, resp.Match))
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Nil(t, resp)
|
||||
require.Equal(t, io.EOF, err)
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: pool})
|
||||
require.NoError(t, err)
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Backfills, 1)
|
||||
// FetchMatches results in a one Backfill update, so Generation is autoincremented
|
||||
require.Equal(t, int64(firstBackfillGeneration)+1, resp.Backfills[0].Generation)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, io.EOF, err)
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackfillQueryAfterGSUpdate(t *testing.T) {
|
||||
om := newOM(t)
|
||||
backfill := &pb.Backfill{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
}
|
||||
pool := &pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{{StringArg: "field", Value: "value"}},
|
||||
}
|
||||
{
|
||||
resp, err := om.Frontend().CreateBackfill(context.Background(), &pb.CreateBackfillRequest{Backfill: backfill})
|
||||
require.NotNil(t, resp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(firstBackfillGeneration), resp.Generation)
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: pool})
|
||||
require.NoError(t, err)
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Backfills, 1)
|
||||
require.Equal(t, int64(firstBackfillGeneration), resp.Backfills[0].Generation)
|
||||
backfill = resp.Backfills[0]
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, io.EOF, err)
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
{
|
||||
resp, err := om.Frontend().UpdateBackfill(context.Background(), &pb.UpdateBackfillRequest{Backfill: backfill})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Equal(t, int64(firstBackfillGeneration+1), resp.Generation)
|
||||
}
|
||||
{
|
||||
stream, err := om.Query().QueryBackfills(context.Background(), &pb.QueryBackfillsRequest{Pool: pool})
|
||||
require.NoError(t, err)
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.Len(t, resp.Backfills, 1)
|
||||
require.Equal(t, int64(firstBackfillGeneration+1), resp.Backfills[0].Generation)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, io.EOF, err)
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
}
|
@ -113,7 +113,8 @@ func TestPaging(t *testing.T) {
|
||||
require.Equal(t, expectedIds, foundIds)
|
||||
|
||||
resp, err = stream.Recv()
|
||||
require.Equal(t, err, io.EOF)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), io.EOF.Error())
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
@ -153,7 +154,11 @@ func returnedByQuery(t *testing.T, tc testcases.TestCase) (found bool) {
|
||||
om := newOM(t)
|
||||
|
||||
{
|
||||
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
|
||||
ticket := pb.Ticket{
|
||||
SearchFields: tc.SearchFields,
|
||||
}
|
||||
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: &ticket})
|
||||
|
||||
require.NotNil(t, resp)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
@ -183,7 +188,11 @@ func returnedByQueryID(t *testing.T, tc testcases.TestCase) (found bool) {
|
||||
om := newOM(t)
|
||||
|
||||
{
|
||||
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
|
||||
ticket := pb.Ticket{
|
||||
SearchFields: tc.SearchFields,
|
||||
}
|
||||
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: &ticket})
|
||||
|
||||
require.NotNil(t, resp)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
@ -16,10 +16,13 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -55,7 +58,8 @@ func TestAssignTickets(t *testing.T) {
|
||||
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
|
||||
expected := &pb.AssignTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
|
||||
get, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
|
||||
require.Nil(t, err)
|
||||
@ -70,6 +74,27 @@ func TestAssignTickets(t *testing.T) {
|
||||
require.Equal(t, "b", get.Assignment.Connection)
|
||||
}
|
||||
|
||||
// TestAssignTicketsEmpty covers calls to assign when empty TicketIds
|
||||
func TestAssignTicketsEmpty(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx := context.Background()
|
||||
|
||||
req := &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{},
|
||||
Assignment: &pb.Assignment{Connection: "a"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.Nil(t, resp)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Convert(err).Code().String())
|
||||
require.Equal(t, "AssignmentGroupTicketIds is empty", status.Convert(err).Message())
|
||||
|
||||
}
|
||||
|
||||
// TestAssignTicketsInvalidArgument covers various invalid calls to assign
|
||||
// tickets.
|
||||
func TestAssignTicketsInvalidArgument(t *testing.T) {
|
||||
@ -161,14 +186,15 @@ func TestAssignTicketsMissingTicket(t *testing.T) {
|
||||
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.AssignTicketsResponse{
|
||||
expected := &pb.AssignTicketsResponse{
|
||||
Failures: []*pb.AssignmentFailure{
|
||||
{
|
||||
TicketId: t2.Id,
|
||||
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
|
||||
},
|
||||
},
|
||||
}, resp)
|
||||
}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
}
|
||||
|
||||
func TestTicketDelete(t *testing.T) {
|
||||
@ -198,7 +224,8 @@ func TestEmptyReleaseTicketsRequest(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
|
||||
expected := &pb.ReleaseTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
}
|
||||
|
||||
// TestReleaseTickets covers that tickets returned from matches are no longer
|
||||
@ -285,7 +312,8 @@ func TestReleaseTickets(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
|
||||
expected := &pb.ReleaseTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
}
|
||||
|
||||
{ // Ticket present in query
|
||||
@ -367,7 +395,8 @@ func TestReleaseAllTickets(t *testing.T) {
|
||||
resp, err := om.Backend().ReleaseAllTickets(ctx, &pb.ReleaseAllTicketsRequest{})
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.ReleaseAllTicketsResponse{}, resp)
|
||||
expected := &pb.ReleaseAllTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
}
|
||||
|
||||
{ // Ticket present in query
|
||||
@ -560,7 +589,8 @@ func TestAssignedTicketsNotReturnedByQuery(t *testing.T) {
|
||||
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
|
||||
expected := &pb.AssignTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
|
||||
require.False(t, returned())
|
||||
}
|
||||
@ -585,7 +615,8 @@ func TestAssignedTicketDeleteTimeout(t *testing.T) {
|
||||
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
|
||||
expected := &pb.AssignTicketsResponse{}
|
||||
require.True(t, proto.Equal(expected, resp), fmt.Sprintf("Protobuf messages are not equal\nexpected: %v\nactual: %v", expected, resp))
|
||||
|
||||
get, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
|
||||
require.Nil(t, err)
|
||||
@ -598,3 +629,41 @@ func TestAssignedTicketDeleteTimeout(t *testing.T) {
|
||||
require.Equal(t, codes.NotFound, status.Convert(err).Code())
|
||||
|
||||
}
|
||||
|
||||
func TestWatchAssignments(t *testing.T) {
|
||||
om := newOM(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
|
||||
require.NoError(t, err)
|
||||
|
||||
{
|
||||
req := &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{t1.Id},
|
||||
Assignment: &pb.Assignment{Connection: "a"},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp, err := om.Backend().AssignTickets(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resp.Failures, 0)
|
||||
}
|
||||
|
||||
{
|
||||
stream, err := om.Frontend().WatchAssignments(ctx, &pb.WatchAssignmentsRequest{TicketId: t1.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
var a *pb.Assignment
|
||||
for a.GetConnection() == "" {
|
||||
resp, err := stream.Recv()
|
||||
require.NoError(t, err)
|
||||
|
||||
a = resp.Assignment
|
||||
}
|
||||
|
||||
require.Equal(t, "a", a.Connection)
|
||||
}
|
||||
}
|
||||
|
@ -54,3 +54,29 @@ func (s *FakeFrontend) GetTicket(ctx context.Context, req *pb.GetTicketRequest)
|
||||
func (s *FakeFrontend) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
|
||||
return status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info.
|
||||
// This triggers an assignment process.
|
||||
func (s *FakeFrontend) AcknowledgeBackfill(ctx context.Context, req *pb.AcknowledgeBackfillRequest) (*pb.AcknowledgeBackfillResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// CreateBackfill creates a new Backfill object.
|
||||
func (s *FakeFrontend) CreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest) (*pb.Backfill, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// DeleteBackfill deletes a Backfill by its ID.
|
||||
func (s *FakeFrontend) DeleteBackfill(ctx context.Context, req *pb.DeleteBackfillRequest) (*empty.Empty, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// GetBackfill fetches a Backfill object by its ID.
|
||||
func (s *FakeFrontend) GetBackfill(ctx context.Context, req *pb.GetBackfillRequest) (*pb.Backfill, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
||||
// UpdateBackfill updates a Backfill object, if present.
|
||||
func (s *FakeFrontend) UpdateBackfill(ctx context.Context, req *pb.UpdateBackfillRequest) (*pb.Backfill, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "not implemented")
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package matchfunction provides helper methods to simplify authoring a match fuction.
|
||||
// Package matchfunction provides helper methods to simplify authoring a match function.
|
||||
package matchfunction
|
||||
|
||||
import (
|
||||
@ -46,7 +46,7 @@ func QueryPool(ctx context.Context, queryClient pb.QueryServiceClient, pool *pb.
|
||||
}
|
||||
}
|
||||
|
||||
// QueryPools queries queryService and returns the a map of pool names to the tickets belonging to those pools.
|
||||
// QueryPools queries queryService and returns a map of pool names to the tickets belonging to those pools.
|
||||
func QueryPools(ctx context.Context, queryClient pb.QueryServiceClient, pools []*pb.Pool, opts ...grpc.CallOption) (map[string][]*pb.Ticket, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
@ -86,3 +86,66 @@ func QueryPools(ctx context.Context, queryClient pb.QueryServiceClient, pools []
|
||||
|
||||
return poolMap, nil
|
||||
}
|
||||
|
||||
// QueryBackfillPool queries queryService and returns the backfills that belong to the specified pool.
|
||||
func QueryBackfillPool(ctx context.Context, queryClient pb.QueryServiceClient, pool *pb.Pool, opts ...grpc.CallOption) ([]*pb.Backfill, error) {
|
||||
query, err := queryClient.QueryBackfills(ctx, &pb.QueryBackfillsRequest{Pool: pool}, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calling queryService.QueryBackfills: %w", err)
|
||||
}
|
||||
|
||||
var backfills []*pb.Backfill
|
||||
for {
|
||||
resp, err := query.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return backfills, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error receiving backfills from queryService.QueryBackfills: %w", err)
|
||||
}
|
||||
|
||||
backfills = append(backfills, resp.Backfills...)
|
||||
}
|
||||
}
|
||||
|
||||
// QueryBackfillPools queries queryService and returns a map of pool names to the backfills belonging to those pools.
|
||||
func QueryBackfillPools(ctx context.Context, queryClient pb.QueryServiceClient, pools []*pb.Pool, opts ...grpc.CallOption) (map[string][]*pb.Backfill, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
type result struct {
|
||||
err error
|
||||
backfills []*pb.Backfill
|
||||
name string
|
||||
}
|
||||
|
||||
results := make(chan result)
|
||||
for _, pool := range pools {
|
||||
go func(pool *pb.Pool) {
|
||||
r := result{
|
||||
name: pool.Name,
|
||||
}
|
||||
r.backfills, r.err = QueryBackfillPool(ctx, queryClient, pool, opts...)
|
||||
|
||||
select {
|
||||
case results <- r:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}(pool)
|
||||
}
|
||||
|
||||
poolMap := make(map[string][]*pb.Backfill)
|
||||
for i := 0; i < len(pools); i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("context canceled while querying pools: %w", ctx.Err())
|
||||
case r := <-results:
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
|
||||
poolMap[r.name] = r.backfills
|
||||
}
|
||||
}
|
||||
|
||||
return poolMap, nil
|
||||
}
|
||||
|
1293
pkg/pb/backend.pb.go
1293
pkg/pb/backend.pb.go
File diff suppressed because it is too large
Load Diff
@ -13,14 +13,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
@ -29,7 +29,7 @@ var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_BackendService_FetchMatches_0(ctx context.Context, marshaler runtime.Marshaler, client BackendServiceClient, req *http.Request, pathParams map[string]string) (BackendService_FetchMatchesClient, runtime.ServerMetadata, error) {
|
||||
var protoReq FetchMatchesRequest
|
||||
@ -161,6 +161,7 @@ func local_request_BackendService_ReleaseAllTickets_0(ctx context.Context, marsh
|
||||
// RegisterBackendServiceHandlerServer registers the http handlers for service BackendService to "mux".
|
||||
// UnaryRPC :call BackendServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterBackendServiceHandlerFromEndpoint instead.
|
||||
func RegisterBackendServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server BackendServiceServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_BackendService_FetchMatches_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
@ -173,13 +174,16 @@ func RegisterBackendServiceHandlerServer(ctx context.Context, mux *runtime.Serve
|
||||
mux.Handle("POST", pattern_BackendService_AssignTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.BackendService/AssignTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_BackendService_AssignTickets_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -193,13 +197,16 @@ func RegisterBackendServiceHandlerServer(ctx context.Context, mux *runtime.Serve
|
||||
mux.Handle("POST", pattern_BackendService_ReleaseTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.BackendService/ReleaseTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_BackendService_ReleaseTickets_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -213,13 +220,16 @@ func RegisterBackendServiceHandlerServer(ctx context.Context, mux *runtime.Serve
|
||||
mux.Handle("POST", pattern_BackendService_ReleaseAllTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.BackendService/ReleaseAllTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_BackendService_ReleaseAllTickets_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -275,7 +285,7 @@ func RegisterBackendServiceHandlerClient(ctx context.Context, mux *runtime.Serve
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.BackendService/FetchMatches")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -295,7 +305,7 @@ func RegisterBackendServiceHandlerClient(ctx context.Context, mux *runtime.Serve
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.BackendService/AssignTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -315,7 +325,7 @@ func RegisterBackendServiceHandlerClient(ctx context.Context, mux *runtime.Serve
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.BackendService/ReleaseTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -335,7 +345,7 @@ func RegisterBackendServiceHandlerClient(ctx context.Context, mux *runtime.Serve
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.BackendService/ReleaseAllTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -355,13 +365,13 @@ func RegisterBackendServiceHandlerClient(ctx context.Context, mux *runtime.Serve
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_BackendService_FetchMatches_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "matches"}, "fetch", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_BackendService_FetchMatches_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "matches"}, "fetch"))
|
||||
|
||||
pattern_BackendService_AssignTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "assign", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_BackendService_AssignTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "assign"))
|
||||
|
||||
pattern_BackendService_ReleaseTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "release", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_BackendService_ReleaseTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "release"))
|
||||
|
||||
pattern_BackendService_ReleaseAllTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "releaseall", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_BackendService_ReleaseAllTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backendservice", "tickets"}, "releaseall"))
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -1,160 +1,283 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: api/evaluator.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type EvaluateRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A Matches proposed by the Match Function representing a candidate of the final results.
|
||||
Match *Match `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Match *Match `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"`
|
||||
}
|
||||
|
||||
func (m *EvaluateRequest) Reset() { *m = EvaluateRequest{} }
|
||||
func (m *EvaluateRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*EvaluateRequest) ProtoMessage() {}
|
||||
func (x *EvaluateRequest) Reset() {
|
||||
*x = EvaluateRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_evaluator_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EvaluateRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvaluateRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EvaluateRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_evaluator_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvaluateRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EvaluateRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8c58cb7dff9acb0f, []int{0}
|
||||
return file_api_evaluator_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *EvaluateRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_EvaluateRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *EvaluateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_EvaluateRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *EvaluateRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_EvaluateRequest.Merge(m, src)
|
||||
}
|
||||
func (m *EvaluateRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_EvaluateRequest.Size(m)
|
||||
}
|
||||
func (m *EvaluateRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_EvaluateRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_EvaluateRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *EvaluateRequest) GetMatch() *Match {
|
||||
if m != nil {
|
||||
return m.Match
|
||||
func (x *EvaluateRequest) GetMatch() *Match {
|
||||
if x != nil {
|
||||
return x.Match
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EvaluateResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A Match ID representing a shortlisted match returned by the evaluator as the final result.
|
||||
MatchId string `protobuf:"bytes,2,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
MatchId string `protobuf:"bytes,2,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *EvaluateResponse) Reset() { *m = EvaluateResponse{} }
|
||||
func (m *EvaluateResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*EvaluateResponse) ProtoMessage() {}
|
||||
func (x *EvaluateResponse) Reset() {
|
||||
*x = EvaluateResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_evaluator_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EvaluateResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvaluateResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EvaluateResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_evaluator_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvaluateResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EvaluateResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_8c58cb7dff9acb0f, []int{1}
|
||||
return file_api_evaluator_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *EvaluateResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_EvaluateResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *EvaluateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_EvaluateResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *EvaluateResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_EvaluateResponse.Merge(m, src)
|
||||
}
|
||||
func (m *EvaluateResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_EvaluateResponse.Size(m)
|
||||
}
|
||||
func (m *EvaluateResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_EvaluateResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_EvaluateResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *EvaluateResponse) GetMatchId() string {
|
||||
if m != nil {
|
||||
return m.MatchId
|
||||
func (x *EvaluateResponse) GetMatchId() string {
|
||||
if x != nil {
|
||||
return x.MatchId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*EvaluateRequest)(nil), "openmatch.EvaluateRequest")
|
||||
proto.RegisterType((*EvaluateResponse)(nil), "openmatch.EvaluateResponse")
|
||||
var File_api_evaluator_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_api_evaluator_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x22, 0x39, 0x0a, 0x0f, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x33, 0x0a,
|
||||
0x10, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x01,
|
||||
0x10, 0x02, 0x32, 0x7f, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x12,
|
||||
0x72, 0x0a, 0x08, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x6f, 0x70,
|
||||
0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61,
|
||||
0x74, 0x63, 0x68, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x65, 0x73, 0x3a, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x28,
|
||||
0x01, 0x30, 0x01, 0x42, 0x8c, 0x03, 0x5a, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0xaa, 0x02, 0x09, 0x4f, 0x70, 0x65, 0x6e, 0x4d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x92, 0x41, 0xda, 0x02, 0x12, 0xb3, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x49, 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x4d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6f, 0x70,
|
||||
0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x1a, 0x23, 0x6f, 0x70,
|
||||
0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2d, 0x64, 0x69, 0x73, 0x63, 0x75, 0x73, 0x73,
|
||||
0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63, 0x6f,
|
||||
0x6d, 0x2a, 0x56, 0x0a, 0x12, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x32, 0x2e, 0x30, 0x20,
|
||||
0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
|
||||
0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x66, 0x6f, 0x72, 0x67, 0x61, 0x6d, 0x65, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d,
|
||||
0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65,
|
||||
0x72, 0x2f, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x32, 0x03, 0x31, 0x2e, 0x30, 0x2a, 0x02,
|
||||
0x01, 0x02, 0x32, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f,
|
||||
0x6a, 0x73, 0x6f, 0x6e, 0x3a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x52, 0x3b, 0x0a, 0x03, 0x34, 0x30, 0x34, 0x12, 0x34, 0x0a,
|
||||
0x2a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74,
|
||||
0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x64, 0x6f, 0x65, 0x73,
|
||||
0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x2e, 0x12, 0x06, 0x0a, 0x04, 0x9a,
|
||||
0x02, 0x01, 0x07, 0x72, 0x3d, 0x0a, 0x18, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x4d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x20, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x21, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61,
|
||||
0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x73, 0x69, 0x74, 0x65, 0x2f, 0x64, 0x6f, 0x63,
|
||||
0x73, 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("api/evaluator.proto", fileDescriptor_8c58cb7dff9acb0f) }
|
||||
var (
|
||||
file_api_evaluator_proto_rawDescOnce sync.Once
|
||||
file_api_evaluator_proto_rawDescData = file_api_evaluator_proto_rawDesc
|
||||
)
|
||||
|
||||
var fileDescriptor_8c58cb7dff9acb0f = []byte{
|
||||
// 492 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x31, 0x8f, 0xd3, 0x30,
|
||||
0x14, 0xc7, 0x95, 0xf4, 0xb8, 0x6b, 0xcd, 0x40, 0x65, 0x24, 0x54, 0x0a, 0x42, 0xa6, 0x27, 0xa1,
|
||||
0x52, 0xd1, 0xb8, 0xd7, 0xeb, 0x42, 0x11, 0xd2, 0x1d, 0xd0, 0xa1, 0xe8, 0x00, 0xa9, 0x48, 0x0c,
|
||||
0x2c, 0xc8, 0x49, 0x1e, 0x49, 0xa0, 0xf1, 0x33, 0x7e, 0x4e, 0x8f, 0x0d, 0x89, 0x99, 0x09, 0x36,
|
||||
0x3e, 0x02, 0x2b, 0x1f, 0x85, 0x8d, 0x99, 0x0f, 0x82, 0x92, 0x50, 0x5a, 0xdd, 0xdd, 0x92, 0xc8,
|
||||
0xfe, 0x3d, 0xff, 0xff, 0xef, 0xfd, 0x6d, 0x76, 0x55, 0x99, 0x4c, 0xc2, 0x4a, 0x2d, 0x0b, 0xe5,
|
||||
0xd0, 0x06, 0xc6, 0xa2, 0x43, 0xde, 0x42, 0x03, 0x3a, 0x57, 0x2e, 0x4a, 0xbb, 0xbc, 0xe4, 0x39,
|
||||
0x10, 0xa9, 0x04, 0xa8, 0xc6, 0xdd, 0x9b, 0x09, 0x62, 0xb2, 0x04, 0x59, 0x22, 0xa5, 0x35, 0x3a,
|
||||
0xe5, 0x32, 0xd4, 0x6b, 0x7a, 0xaf, 0xfa, 0x45, 0xc3, 0x04, 0xf4, 0x90, 0x4e, 0x55, 0x92, 0x80,
|
||||
0x95, 0x68, 0xaa, 0x8a, 0xf3, 0xd5, 0xbd, 0xfb, 0xec, 0xca, 0xac, 0x76, 0x87, 0x05, 0x7c, 0x28,
|
||||
0x80, 0x1c, 0xbf, 0xc3, 0x2e, 0x55, 0xde, 0x1d, 0x4f, 0x78, 0xfd, 0xcb, 0xe3, 0x76, 0xf0, 0xbf,
|
||||
0x9b, 0xe0, 0x59, 0xf9, 0x5d, 0xd4, 0xb8, 0x77, 0xc8, 0xda, 0x9b, 0xa3, 0x64, 0x50, 0x13, 0xf0,
|
||||
0xeb, 0xac, 0x59, 0xc1, 0x37, 0x59, 0xdc, 0xf1, 0x85, 0xd7, 0x6f, 0x2d, 0xf6, 0xaa, 0xf5, 0x3c,
|
||||
0x7e, 0xba, 0xd3, 0xf4, 0xda, 0xfe, 0xf8, 0x13, 0x6b, 0xcd, 0xd6, 0xd3, 0x72, 0xcb, 0x9a, 0x6b,
|
||||
0x05, 0xde, 0xdd, 0xb2, 0x39, 0xd3, 0x51, 0xf7, 0xc6, 0x85, 0xac, 0xb6, 0xec, 0xdd, 0xfd, 0xfc,
|
||||
0xeb, 0xcf, 0x37, 0x7f, 0xbf, 0x77, 0x4b, 0xae, 0x0e, 0x36, 0x49, 0xca, 0xaa, 0x1a, 0x68, 0xfa,
|
||||
0x6f, 0x07, 0xa6, 0xde, 0xa0, 0xef, 0x8d, 0xbc, 0x47, 0x5f, 0x1a, 0x5f, 0x8f, 0x7f, 0xfb, 0xfc,
|
||||
0xa7, 0xb7, 0xd5, 0x48, 0x6f, 0xce, 0xd8, 0x0b, 0x03, 0x5a, 0x54, 0xf3, 0xf1, 0x6b, 0xa9, 0x73,
|
||||
0x86, 0xa6, 0x52, 0x96, 0xae, 0xc3, 0xda, 0x36, 0x86, 0x55, 0x77, 0x7f, 0xb3, 0x1e, 0xc6, 0x19,
|
||||
0x45, 0x05, 0xd1, 0x51, 0x7d, 0x15, 0x89, 0xc5, 0xc2, 0x50, 0x10, 0x61, 0x3e, 0x78, 0xc5, 0xf8,
|
||||
0xb1, 0x51, 0x51, 0x0a, 0x62, 0x1c, 0x8c, 0xc4, 0x49, 0x16, 0x41, 0x99, 0xcb, 0xd1, 0x5a, 0x32,
|
||||
0xc9, 0x5c, 0x5a, 0x84, 0x65, 0xa5, 0xac, 0x8f, 0xbe, 0x45, 0x9b, 0xa8, 0x1c, 0x68, 0xcb, 0x4c,
|
||||
0x86, 0x4b, 0x0c, 0x65, 0xae, 0xc8, 0x81, 0x95, 0x27, 0xf3, 0xc7, 0xb3, 0xe7, 0x2f, 0x67, 0xe3,
|
||||
0xc6, 0x41, 0x30, 0x1a, 0xf8, 0x9e, 0x3f, 0x6e, 0x2b, 0x63, 0x96, 0x59, 0x54, 0xdd, 0xa2, 0x7c,
|
||||
0x47, 0xa8, 0xa7, 0xe7, 0x76, 0x16, 0x0f, 0x58, 0x63, 0x32, 0x9a, 0xf0, 0x09, 0x1b, 0x2c, 0xc0,
|
||||
0x15, 0x56, 0x43, 0x2c, 0x4e, 0x53, 0xd0, 0xc2, 0xa5, 0x20, 0x2c, 0x10, 0x16, 0x36, 0x02, 0x11,
|
||||
0x23, 0x90, 0xd0, 0xe8, 0x04, 0x7c, 0xcc, 0xc8, 0x05, 0x7c, 0x97, 0xed, 0x7c, 0xf7, 0xbd, 0x3d,
|
||||
0xfb, 0x90, 0x75, 0x36, 0x61, 0x88, 0x27, 0x18, 0x15, 0x39, 0xe8, 0xfa, 0xd5, 0xf0, 0xdb, 0x17,
|
||||
0x47, 0x23, 0x29, 0x73, 0x20, 0x63, 0x8c, 0x48, 0xbe, 0x16, 0x67, 0xd0, 0xd6, 0x5c, 0xe6, 0x7d,
|
||||
0x22, 0x4d, 0xf8, 0xc3, 0x6f, 0x95, 0xfa, 0x95, 0x7c, 0xb8, 0x5b, 0x3d, 0xc3, 0xc3, 0xbf, 0x01,
|
||||
0x00, 0x00, 0xff, 0xff, 0xf4, 0x55, 0x7c, 0x81, 0x08, 0x03, 0x00, 0x00,
|
||||
func file_api_evaluator_proto_rawDescGZIP() []byte {
|
||||
file_api_evaluator_proto_rawDescOnce.Do(func() {
|
||||
file_api_evaluator_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_evaluator_proto_rawDescData)
|
||||
})
|
||||
return file_api_evaluator_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_evaluator_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_api_evaluator_proto_goTypes = []interface{}{
|
||||
(*EvaluateRequest)(nil), // 0: openmatch.EvaluateRequest
|
||||
(*EvaluateResponse)(nil), // 1: openmatch.EvaluateResponse
|
||||
(*Match)(nil), // 2: openmatch.Match
|
||||
}
|
||||
var file_api_evaluator_proto_depIdxs = []int32{
|
||||
2, // 0: openmatch.EvaluateRequest.match:type_name -> openmatch.Match
|
||||
0, // 1: openmatch.Evaluator.Evaluate:input_type -> openmatch.EvaluateRequest
|
||||
1, // 2: openmatch.Evaluator.Evaluate:output_type -> openmatch.EvaluateResponse
|
||||
2, // [2:3] is the sub-list for method output_type
|
||||
1, // [1:2] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_evaluator_proto_init() }
|
||||
func file_api_evaluator_proto_init() {
|
||||
if File_api_evaluator_proto != nil {
|
||||
return
|
||||
}
|
||||
file_api_messages_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_api_evaluator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EvaluateRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_evaluator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EvaluateResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_api_evaluator_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_api_evaluator_proto_goTypes,
|
||||
DependencyIndexes: file_api_evaluator_proto_depIdxs,
|
||||
MessageInfos: file_api_evaluator_proto_msgTypes,
|
||||
}.Build()
|
||||
File_api_evaluator_proto = out.File
|
||||
file_api_evaluator_proto_rawDesc = nil
|
||||
file_api_evaluator_proto_goTypes = nil
|
||||
file_api_evaluator_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// EvaluatorClient is the client API for Evaluator service.
|
||||
//
|
||||
@ -165,10 +288,10 @@ type EvaluatorClient interface {
|
||||
}
|
||||
|
||||
type evaluatorClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewEvaluatorClient(cc *grpc.ClientConn) EvaluatorClient {
|
||||
func NewEvaluatorClient(cc grpc.ClientConnInterface) EvaluatorClient {
|
||||
return &evaluatorClient{cc}
|
||||
}
|
||||
|
||||
@ -213,7 +336,7 @@ type EvaluatorServer interface {
|
||||
type UnimplementedEvaluatorServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedEvaluatorServer) Evaluate(srv Evaluator_EvaluateServer) error {
|
||||
func (*UnimplementedEvaluatorServer) Evaluate(Evaluator_EvaluateServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Evaluate not implemented")
|
||||
}
|
||||
|
||||
|
@ -13,14 +13,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
@ -29,7 +29,7 @@ var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_Evaluator_Evaluate_0(ctx context.Context, marshaler runtime.Marshaler, client EvaluatorClient, req *http.Request, pathParams map[string]string) (Evaluator_EvaluateClient, runtime.ServerMetadata, error) {
|
||||
var metadata runtime.ServerMetadata
|
||||
@ -86,6 +86,7 @@ func request_Evaluator_Evaluate_0(ctx context.Context, marshaler runtime.Marshal
|
||||
// RegisterEvaluatorHandlerServer registers the http handlers for service Evaluator to "mux".
|
||||
// UnaryRPC :call EvaluatorServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterEvaluatorHandlerFromEndpoint instead.
|
||||
func RegisterEvaluatorHandlerServer(ctx context.Context, mux *runtime.ServeMux, server EvaluatorServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_Evaluator_Evaluate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
@ -140,7 +141,7 @@ func RegisterEvaluatorHandlerClient(ctx context.Context, mux *runtime.ServeMux,
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.Evaluator/Evaluate")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -160,7 +161,7 @@ func RegisterEvaluatorHandlerClient(ctx context.Context, mux *runtime.ServeMux,
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Evaluator_Evaluate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "evaluator", "matches"}, "evaluate", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_Evaluator_Evaluate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "evaluator", "matches"}, "evaluate"))
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -1,81 +1,161 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: api/extensions.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// A DefaultEvaluationCriteria is used for a match's evaluation_input when using
|
||||
// the default evaluator.
|
||||
type DefaultEvaluationCriteria struct {
|
||||
Score float64 `protobuf:"fixed64,1,opt,name=score,proto3" json:"score,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Score float64 `protobuf:"fixed64,1,opt,name=score,proto3" json:"score,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DefaultEvaluationCriteria) Reset() { *m = DefaultEvaluationCriteria{} }
|
||||
func (m *DefaultEvaluationCriteria) String() string { return proto.CompactTextString(m) }
|
||||
func (*DefaultEvaluationCriteria) ProtoMessage() {}
|
||||
func (x *DefaultEvaluationCriteria) Reset() {
|
||||
*x = DefaultEvaluationCriteria{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_extensions_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DefaultEvaluationCriteria) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DefaultEvaluationCriteria) ProtoMessage() {}
|
||||
|
||||
func (x *DefaultEvaluationCriteria) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_extensions_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DefaultEvaluationCriteria.ProtoReflect.Descriptor instead.
|
||||
func (*DefaultEvaluationCriteria) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_09e3e066475ff045, []int{0}
|
||||
return file_api_extensions_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *DefaultEvaluationCriteria) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DefaultEvaluationCriteria.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DefaultEvaluationCriteria) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DefaultEvaluationCriteria.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DefaultEvaluationCriteria) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DefaultEvaluationCriteria.Merge(m, src)
|
||||
}
|
||||
func (m *DefaultEvaluationCriteria) XXX_Size() int {
|
||||
return xxx_messageInfo_DefaultEvaluationCriteria.Size(m)
|
||||
}
|
||||
func (m *DefaultEvaluationCriteria) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DefaultEvaluationCriteria.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DefaultEvaluationCriteria proto.InternalMessageInfo
|
||||
|
||||
func (m *DefaultEvaluationCriteria) GetScore() float64 {
|
||||
if m != nil {
|
||||
return m.Score
|
||||
func (x *DefaultEvaluationCriteria) GetScore() float64 {
|
||||
if x != nil {
|
||||
return x.Score
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DefaultEvaluationCriteria)(nil), "openmatch.DefaultEvaluationCriteria")
|
||||
var File_api_extensions_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_api_extensions_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x22, 0x31, 0x0a, 0x19, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73,
|
||||
0x63, 0x6f, 0x72, 0x65, 0x42, 0x2e, 0x5a, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0xaa, 0x02, 0x09, 0x4f, 0x70, 0x65, 0x6e, 0x4d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("api/extensions.proto", fileDescriptor_09e3e066475ff045) }
|
||||
var (
|
||||
file_api_extensions_proto_rawDescOnce sync.Once
|
||||
file_api_extensions_proto_rawDescData = file_api_extensions_proto_rawDesc
|
||||
)
|
||||
|
||||
var fileDescriptor_09e3e066475ff045 = []byte{
|
||||
// 142 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x49, 0x2c, 0xc8, 0xd4,
|
||||
0x4f, 0xad, 0x28, 0x49, 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
|
||||
0x17, 0xe2, 0xcc, 0x2f, 0x48, 0xcd, 0xcb, 0x4d, 0x2c, 0x49, 0xce, 0x50, 0x32, 0xe4, 0x92, 0x74,
|
||||
0x49, 0x4d, 0x4b, 0x2c, 0xcd, 0x29, 0x71, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf,
|
||||
0x73, 0x2e, 0xca, 0x2c, 0x49, 0x2d, 0xca, 0x4c, 0x14, 0x12, 0xe1, 0x62, 0x2d, 0x4e, 0xce, 0x2f,
|
||||
0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0c, 0x82, 0x70, 0x9c, 0xf4, 0xa2, 0x14, 0x40, 0xfa,
|
||||
0x75, 0xc1, 0x06, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0x23, 0xb8, 0xfa, 0x05, 0xd9, 0xe9, 0xfa, 0x05,
|
||||
0x49, 0xab, 0x98, 0x38, 0xfd, 0x0b, 0x52, 0xf3, 0x7c, 0x41, 0x42, 0x49, 0x6c, 0x60, 0x4b, 0x8d,
|
||||
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x69, 0xf6, 0x37, 0x8c, 0x00, 0x00, 0x00,
|
||||
func file_api_extensions_proto_rawDescGZIP() []byte {
|
||||
file_api_extensions_proto_rawDescOnce.Do(func() {
|
||||
file_api_extensions_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_extensions_proto_rawDescData)
|
||||
})
|
||||
return file_api_extensions_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_extensions_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_api_extensions_proto_goTypes = []interface{}{
|
||||
(*DefaultEvaluationCriteria)(nil), // 0: openmatch.DefaultEvaluationCriteria
|
||||
}
|
||||
var file_api_extensions_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_extensions_proto_init() }
|
||||
func file_api_extensions_proto_init() {
|
||||
if File_api_extensions_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_api_extensions_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DefaultEvaluationCriteria); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_api_extensions_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_api_extensions_proto_goTypes,
|
||||
DependencyIndexes: file_api_extensions_proto_depIdxs,
|
||||
MessageInfos: file_api_extensions_proto_msgTypes,
|
||||
}.Build()
|
||||
File_api_extensions_proto = out.File
|
||||
file_api_extensions_proto_rawDesc = nil
|
||||
file_api_extensions_proto_goTypes = nil
|
||||
file_api_extensions_proto_depIdxs = nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,14 +13,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
@ -29,7 +29,7 @@ var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_FrontendService_CreateTicket_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq CreateTicketRequest
|
||||
@ -82,7 +82,6 @@ func request_FrontendService_DeleteTicket_0(ctx context.Context, marshaler runti
|
||||
}
|
||||
|
||||
protoReq.TicketId, err = runtime.String(val)
|
||||
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
|
||||
}
|
||||
@ -109,7 +108,6 @@ func local_request_FrontendService_DeleteTicket_0(ctx context.Context, marshaler
|
||||
}
|
||||
|
||||
protoReq.TicketId, err = runtime.String(val)
|
||||
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
|
||||
}
|
||||
@ -136,7 +134,6 @@ func request_FrontendService_GetTicket_0(ctx context.Context, marshaler runtime.
|
||||
}
|
||||
|
||||
protoReq.TicketId, err = runtime.String(val)
|
||||
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
|
||||
}
|
||||
@ -163,7 +160,6 @@ func local_request_FrontendService_GetTicket_0(ctx context.Context, marshaler ru
|
||||
}
|
||||
|
||||
protoReq.TicketId, err = runtime.String(val)
|
||||
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
|
||||
}
|
||||
@ -190,7 +186,6 @@ func request_FrontendService_WatchAssignments_0(ctx context.Context, marshaler r
|
||||
}
|
||||
|
||||
protoReq.TicketId, err = runtime.String(val)
|
||||
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
|
||||
}
|
||||
@ -208,21 +203,265 @@ func request_FrontendService_WatchAssignments_0(ctx context.Context, marshaler r
|
||||
|
||||
}
|
||||
|
||||
func request_FrontendService_AcknowledgeBackfill_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq AcknowledgeBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := client.AcknowledgeBackfill(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_FrontendService_AcknowledgeBackfill_0(ctx context.Context, marshaler runtime.Marshaler, server FrontendServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq AcknowledgeBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := server.AcknowledgeBackfill(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_FrontendService_CreateBackfill_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq CreateBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.CreateBackfill(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_FrontendService_CreateBackfill_0(ctx context.Context, marshaler runtime.Marshaler, server FrontendServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq CreateBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.CreateBackfill(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_FrontendService_DeleteBackfill_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteBackfill(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_FrontendService_DeleteBackfill_0(ctx context.Context, marshaler runtime.Marshaler, server FrontendServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := server.DeleteBackfill(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_FrontendService_GetBackfill_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq GetBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetBackfill(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_FrontendService_GetBackfill_0(ctx context.Context, marshaler runtime.Marshaler, server FrontendServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq GetBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["backfill_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "backfill_id")
|
||||
}
|
||||
|
||||
protoReq.BackfillId, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "backfill_id", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetBackfill(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_FrontendService_UpdateBackfill_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UpdateBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.UpdateBackfill(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_FrontendService_UpdateBackfill_0(ctx context.Context, marshaler runtime.Marshaler, server FrontendServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UpdateBackfillRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.UpdateBackfill(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterFrontendServiceHandlerServer registers the http handlers for service FrontendService to "mux".
|
||||
// UnaryRPC :call FrontendServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterFrontendServiceHandlerFromEndpoint instead.
|
||||
func RegisterFrontendServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server FrontendServiceServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_FrontendService_CreateTicket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/CreateTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_CreateTicket_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -236,13 +475,16 @@ func RegisterFrontendServiceHandlerServer(ctx context.Context, mux *runtime.Serv
|
||||
mux.Handle("DELETE", pattern_FrontendService_DeleteTicket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/DeleteTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_DeleteTicket_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -256,13 +498,16 @@ func RegisterFrontendServiceHandlerServer(ctx context.Context, mux *runtime.Serv
|
||||
mux.Handle("GET", pattern_FrontendService_GetTicket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/GetTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_GetTicket_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
@ -280,6 +525,121 @@ func RegisterFrontendServiceHandlerServer(ctx context.Context, mux *runtime.Serv
|
||||
return
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_FrontendService_AcknowledgeBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/AcknowledgeBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_AcknowledgeBackfill_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_AcknowledgeBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_FrontendService_CreateBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/CreateBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_CreateBackfill_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_CreateBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_FrontendService_DeleteBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/DeleteBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_DeleteBackfill_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_DeleteBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_FrontendService_GetBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/GetBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_GetBackfill_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_GetBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("PATCH", pattern_FrontendService_UpdateBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/openmatch.FrontendService/UpdateBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_FrontendService_UpdateBackfill_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_UpdateBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -325,7 +685,7 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/CreateTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -345,7 +705,7 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/DeleteTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -365,7 +725,7 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/GetTicket")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -385,7 +745,7 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/WatchAssignments")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -401,17 +761,127 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_FrontendService_AcknowledgeBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/AcknowledgeBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_FrontendService_AcknowledgeBackfill_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_AcknowledgeBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_FrontendService_CreateBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/CreateBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_FrontendService_CreateBackfill_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_CreateBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_FrontendService_DeleteBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/DeleteBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_FrontendService_DeleteBackfill_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_DeleteBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_FrontendService_GetBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/GetBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_FrontendService_GetBackfill_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_GetBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("PATCH", pattern_FrontendService_UpdateBackfill_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.FrontendService/UpdateBackfill")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_FrontendService_UpdateBackfill_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_FrontendService_UpdateBackfill_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_FrontendService_CreateTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "frontendservice", "tickets"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_FrontendService_CreateTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "frontendservice", "tickets"}, ""))
|
||||
|
||||
pattern_FrontendService_DeleteTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "tickets", "ticket_id"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_FrontendService_DeleteTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "tickets", "ticket_id"}, ""))
|
||||
|
||||
pattern_FrontendService_GetTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "tickets", "ticket_id"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_FrontendService_GetTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "tickets", "ticket_id"}, ""))
|
||||
|
||||
pattern_FrontendService_WatchAssignments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "frontendservice", "tickets", "ticket_id", "assignments"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_FrontendService_WatchAssignments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "frontendservice", "tickets", "ticket_id", "assignments"}, ""))
|
||||
|
||||
pattern_FrontendService_AcknowledgeBackfill_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "frontendservice", "backfills", "backfill_id", "acknowledge"}, ""))
|
||||
|
||||
pattern_FrontendService_CreateBackfill_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "frontendservice", "backfills"}, ""))
|
||||
|
||||
pattern_FrontendService_DeleteBackfill_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "backfills", "backfill_id"}, ""))
|
||||
|
||||
pattern_FrontendService_GetBackfill_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "backfills", "backfill_id"}, ""))
|
||||
|
||||
pattern_FrontendService_UpdateBackfill_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "frontendservice", "backfills"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@ -422,4 +892,14 @@ var (
|
||||
forward_FrontendService_GetTicket_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_FrontendService_WatchAssignments_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_FrontendService_AcknowledgeBackfill_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_FrontendService_CreateBackfill_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_FrontendService_DeleteBackfill_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_FrontendService_GetBackfill_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_FrontendService_UpdateBackfill_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
@ -1,177 +1,303 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: api/matchfunction.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type RunRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A MatchProfile defines constraints of Tickets in a Match and shapes the Match proposed by the MatchFunction.
|
||||
Profile *MatchProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Profile *MatchProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RunRequest) Reset() { *m = RunRequest{} }
|
||||
func (m *RunRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RunRequest) ProtoMessage() {}
|
||||
func (x *RunRequest) Reset() {
|
||||
*x = RunRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_matchfunction_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RunRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RunRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RunRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_matchfunction_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RunRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RunRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2b5069a21f149a55, []int{0}
|
||||
return file_api_matchfunction_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *RunRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RunRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RunRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RunRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RunRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RunRequest.Merge(m, src)
|
||||
}
|
||||
func (m *RunRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_RunRequest.Size(m)
|
||||
}
|
||||
func (m *RunRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RunRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RunRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *RunRequest) GetProfile() *MatchProfile {
|
||||
if m != nil {
|
||||
return m.Profile
|
||||
func (x *RunRequest) GetProfile() *MatchProfile {
|
||||
if x != nil {
|
||||
return x.Profile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RunResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A Proposal represents a Match candidate that satifies the constraints defined in the input Profile.
|
||||
// A valid Proposal response will contain at least one ticket.
|
||||
Proposal *Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Proposal *Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RunResponse) Reset() { *m = RunResponse{} }
|
||||
func (m *RunResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*RunResponse) ProtoMessage() {}
|
||||
func (x *RunResponse) Reset() {
|
||||
*x = RunResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_matchfunction_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RunResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RunResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RunResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_matchfunction_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RunResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RunResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2b5069a21f149a55, []int{1}
|
||||
return file_api_matchfunction_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *RunResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RunResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RunResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RunResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RunResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RunResponse.Merge(m, src)
|
||||
}
|
||||
func (m *RunResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_RunResponse.Size(m)
|
||||
}
|
||||
func (m *RunResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RunResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RunResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *RunResponse) GetProposal() *Match {
|
||||
if m != nil {
|
||||
return m.Proposal
|
||||
func (x *RunResponse) GetProposal() *Match {
|
||||
if x != nil {
|
||||
return x.Proposal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*RunRequest)(nil), "openmatch.RunRequest")
|
||||
proto.RegisterType((*RunResponse)(nil), "openmatch.RunResponse")
|
||||
var File_api_matchfunction_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_api_matchfunction_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x66, 0x75, 0x6e, 0x63, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x6f, 0x70, 0x65, 0x6e, 0x6d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67,
|
||||
0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0a, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x07,
|
||||
0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73,
|
||||
0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70,
|
||||
0x6f, 0x73, 0x61, 0x6c, 0x32, 0x69, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x46, 0x75, 0x6e,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x58, 0x0a, 0x03, 0x52, 0x75, 0x6e, 0x12, 0x15, 0x2e, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e,
|
||||
0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x66, 0x75,
|
||||
0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x72, 0x75, 0x6e, 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x42,
|
||||
0x91, 0x03, 0x5a, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x70, 0x6b,
|
||||
0x67, 0x2f, 0x70, 0x62, 0xaa, 0x02, 0x09, 0x4f, 0x70, 0x65, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x92, 0x41, 0xdf, 0x02, 0x12, 0xb8, 0x01, 0x0a, 0x0e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x46,
|
||||
0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x6e, 0x20,
|
||||
0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x1a, 0x23, 0x6f,
|
||||
0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2d, 0x64, 0x69, 0x73, 0x63, 0x75, 0x73,
|
||||
0x73, 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2a, 0x56, 0x0a, 0x12, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x32, 0x2e, 0x30,
|
||||
0x20, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
|
||||
0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x66, 0x6f, 0x72, 0x67, 0x61, 0x6d, 0x65, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
|
||||
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74,
|
||||
0x65, 0x72, 0x2f, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x32, 0x03, 0x31, 0x2e, 0x30, 0x2a,
|
||||
0x02, 0x01, 0x02, 0x32, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x3a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x52, 0x3b, 0x0a, 0x03, 0x34, 0x30, 0x34, 0x12, 0x34,
|
||||
0x0a, 0x2a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20,
|
||||
0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x64, 0x6f, 0x65,
|
||||
0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x2e, 0x12, 0x06, 0x0a, 0x04,
|
||||
0x9a, 0x02, 0x01, 0x07, 0x72, 0x3d, 0x0a, 0x18, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x4d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x20, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x21, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x73, 0x69, 0x74, 0x65, 0x2f, 0x64, 0x6f,
|
||||
0x63, 0x73, 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("api/matchfunction.proto", fileDescriptor_2b5069a21f149a55) }
|
||||
var (
|
||||
file_api_matchfunction_proto_rawDescOnce sync.Once
|
||||
file_api_matchfunction_proto_rawDescData = file_api_matchfunction_proto_rawDesc
|
||||
)
|
||||
|
||||
var fileDescriptor_2b5069a21f149a55 = []byte{
|
||||
// 486 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4d, 0x6e, 0x13, 0x31,
|
||||
0x18, 0xd5, 0x4c, 0x50, 0x4b, 0x5d, 0x81, 0x2a, 0x4b, 0xfd, 0x51, 0xc4, 0xc2, 0x84, 0x0d, 0x8a,
|
||||
0x9a, 0x71, 0x1a, 0xba, 0x4a, 0x85, 0x68, 0x81, 0x22, 0x55, 0x2a, 0x3f, 0x1a, 0x24, 0x84, 0xd8,
|
||||
0x39, 0xce, 0xd7, 0x99, 0x81, 0xc4, 0x9f, 0xf1, 0x67, 0xb7, 0xac, 0xb9, 0x01, 0xb0, 0xe3, 0x08,
|
||||
0x9c, 0x80, 0x7b, 0x70, 0x00, 0x36, 0x1c, 0x04, 0x8d, 0xa7, 0x69, 0x42, 0xca, 0x66, 0x46, 0x7e,
|
||||
0xef, 0x7d, 0xef, 0xd9, 0xcf, 0x66, 0xdb, 0xca, 0x56, 0x72, 0xaa, 0xbc, 0x2e, 0xcf, 0x82, 0xd1,
|
||||
0xbe, 0x42, 0x93, 0x59, 0x87, 0x1e, 0xf9, 0x1a, 0x5a, 0x30, 0x91, 0x68, 0xf3, 0xa8, 0x01, 0x22,
|
||||
0x55, 0x00, 0x35, 0x74, 0xfb, 0x4e, 0x81, 0x58, 0x4c, 0x40, 0xd6, 0x94, 0x32, 0x06, 0xbd, 0xaa,
|
||||
0x67, 0x67, 0xec, 0x6e, 0xfc, 0xe9, 0x5e, 0x01, 0xa6, 0x47, 0x17, 0xaa, 0x28, 0xc0, 0x49, 0xb4,
|
||||
0x51, 0x71, 0x5d, 0xdd, 0x79, 0xc4, 0x58, 0x1e, 0x4c, 0x0e, 0x1f, 0x03, 0x90, 0xe7, 0x7b, 0x6c,
|
||||
0xd5, 0x3a, 0x3c, 0xab, 0x26, 0xb0, 0x93, 0x88, 0xe4, 0xfe, 0xfa, 0x60, 0x3b, 0xbb, 0xda, 0x4a,
|
||||
0xf6, 0xbc, 0xfe, 0xbe, 0x6a, 0xe8, 0x7c, 0xa6, 0xeb, 0x1c, 0xb0, 0xf5, 0x68, 0x40, 0x16, 0x0d,
|
||||
0x01, 0xdf, 0x65, 0x37, 0xad, 0x43, 0x8b, 0xa4, 0x26, 0x97, 0x16, 0x1b, 0xcb, 0x16, 0xf9, 0x95,
|
||||
0x62, 0x50, 0xb1, 0x5b, 0x11, 0x7a, 0x76, 0x79, 0x7e, 0xfe, 0x96, 0xb5, 0xf2, 0x60, 0xf8, 0xe6,
|
||||
0xc2, 0xcc, 0x7c, 0x7b, 0xed, 0xad, 0x65, 0xb8, 0x09, 0xed, 0x88, 0xcf, 0xbf, 0xfe, 0x7c, 0x4b,
|
||||
0xdb, 0x9d, 0x4d, 0x79, 0xbe, 0xf7, 0x6f, 0xa1, 0x43, 0x17, 0xcc, 0x30, 0xe9, 0xf6, 0x93, 0xc7,
|
||||
0x5f, 0x5a, 0x5f, 0x8f, 0x7e, 0xa7, 0xfc, 0x67, 0xc2, 0x6e, 0xc7, 0x48, 0x31, 0xcb, 0xec, 0x9c,
|
||||
0x30, 0xf6, 0xd2, 0x82, 0x11, 0x11, 0xe6, 0x5b, 0xa5, 0xf7, 0x96, 0x86, 0x52, 0xd6, 0x51, 0xbd,
|
||||
0x26, 0x6b, 0x0c, 0xe7, 0xed, 0x7b, 0xf3, 0x75, 0x6f, 0x5c, 0x91, 0x0e, 0x44, 0x87, 0xcd, 0x3d,
|
||||
0x14, 0x0e, 0x83, 0xa5, 0x4c, 0xe3, 0xb4, 0xfb, 0x86, 0xf1, 0x23, 0xab, 0x74, 0x09, 0x62, 0x90,
|
||||
0xf5, 0xc5, 0x69, 0xa5, 0xa1, 0xee, 0xe4, 0x70, 0x66, 0x59, 0x54, 0xbe, 0x0c, 0xa3, 0x5a, 0x29,
|
||||
0x9b, 0xd1, 0x33, 0x74, 0x85, 0x9a, 0x02, 0x2d, 0x84, 0xc9, 0xd1, 0x04, 0x47, 0x72, 0xaa, 0xc8,
|
||||
0x83, 0x93, 0xa7, 0x27, 0x4f, 0x8e, 0x5f, 0xbc, 0x3e, 0x1e, 0xb4, 0xf6, 0xb2, 0x7e, 0x37, 0x4d,
|
||||
0xd2, 0xc1, 0x86, 0xb2, 0x76, 0x52, 0xe9, 0x78, 0x85, 0xf2, 0x3d, 0xa1, 0x19, 0x5e, 0x43, 0xf2,
|
||||
0x03, 0xd6, 0xda, 0xef, 0xef, 0xf3, 0x7d, 0xd6, 0xcd, 0xc1, 0x07, 0x67, 0x60, 0x2c, 0x2e, 0x4a,
|
||||
0x30, 0xc2, 0x97, 0x20, 0x1c, 0x10, 0x06, 0xa7, 0x41, 0x8c, 0x11, 0x48, 0x18, 0xf4, 0x02, 0x3e,
|
||||
0x55, 0xe4, 0x33, 0xbe, 0xc2, 0x6e, 0x7c, 0x4f, 0x93, 0x55, 0xf7, 0x90, 0xed, 0xcc, 0xcb, 0x10,
|
||||
0x4f, 0x51, 0x87, 0x29, 0x98, 0xe6, 0xc9, 0xf0, 0xbb, 0xff, 0xaf, 0x46, 0x52, 0xe5, 0x41, 0x8e,
|
||||
0x51, 0x93, 0x7c, 0x27, 0x96, 0xa8, 0x85, 0x73, 0xd9, 0x0f, 0x85, 0xb4, 0xa3, 0x1f, 0xe9, 0x5a,
|
||||
0xed, 0x1f, 0xed, 0x47, 0x2b, 0xf1, 0x0d, 0x3e, 0xf8, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x79, 0x14,
|
||||
0x90, 0xfd, 0x09, 0x03, 0x00, 0x00,
|
||||
func file_api_matchfunction_proto_rawDescGZIP() []byte {
|
||||
file_api_matchfunction_proto_rawDescOnce.Do(func() {
|
||||
file_api_matchfunction_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_matchfunction_proto_rawDescData)
|
||||
})
|
||||
return file_api_matchfunction_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_matchfunction_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_api_matchfunction_proto_goTypes = []interface{}{
|
||||
(*RunRequest)(nil), // 0: openmatch.RunRequest
|
||||
(*RunResponse)(nil), // 1: openmatch.RunResponse
|
||||
(*MatchProfile)(nil), // 2: openmatch.MatchProfile
|
||||
(*Match)(nil), // 3: openmatch.Match
|
||||
}
|
||||
var file_api_matchfunction_proto_depIdxs = []int32{
|
||||
2, // 0: openmatch.RunRequest.profile:type_name -> openmatch.MatchProfile
|
||||
3, // 1: openmatch.RunResponse.proposal:type_name -> openmatch.Match
|
||||
0, // 2: openmatch.MatchFunction.Run:input_type -> openmatch.RunRequest
|
||||
1, // 3: openmatch.MatchFunction.Run:output_type -> openmatch.RunResponse
|
||||
3, // [3:4] is the sub-list for method output_type
|
||||
2, // [2:3] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_matchfunction_proto_init() }
|
||||
func file_api_matchfunction_proto_init() {
|
||||
if File_api_matchfunction_proto != nil {
|
||||
return
|
||||
}
|
||||
file_api_messages_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_api_matchfunction_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RunRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_matchfunction_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RunResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_api_matchfunction_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_api_matchfunction_proto_goTypes,
|
||||
DependencyIndexes: file_api_matchfunction_proto_depIdxs,
|
||||
MessageInfos: file_api_matchfunction_proto_msgTypes,
|
||||
}.Build()
|
||||
File_api_matchfunction_proto = out.File
|
||||
file_api_matchfunction_proto_rawDesc = nil
|
||||
file_api_matchfunction_proto_goTypes = nil
|
||||
file_api_matchfunction_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// MatchFunctionClient is the client API for MatchFunction service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type MatchFunctionClient interface {
|
||||
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then
|
||||
// constructs and streams back match candidates to the Backend service.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService,
|
||||
// runs matchmaking logic against them, then constructs and streams back
|
||||
// match candidates to the Backend service.
|
||||
Run(ctx context.Context, in *RunRequest, opts ...grpc.CallOption) (MatchFunction_RunClient, error)
|
||||
}
|
||||
|
||||
type matchFunctionClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewMatchFunctionClient(cc *grpc.ClientConn) MatchFunctionClient {
|
||||
func NewMatchFunctionClient(cc grpc.ClientConnInterface) MatchFunctionClient {
|
||||
return &matchFunctionClient{cc}
|
||||
}
|
||||
|
||||
@ -210,8 +336,9 @@ func (x *matchFunctionRunClient) Recv() (*RunResponse, error) {
|
||||
// MatchFunctionServer is the server API for MatchFunction service.
|
||||
type MatchFunctionServer interface {
|
||||
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then
|
||||
// constructs and streams back match candidates to the Backend service.
|
||||
// Run pulls Tickets that satisfy Profile constraints from QueryService,
|
||||
// runs matchmaking logic against them, then constructs and streams back
|
||||
// match candidates to the Backend service.
|
||||
Run(*RunRequest, MatchFunction_RunServer) error
|
||||
}
|
||||
|
||||
@ -219,7 +346,7 @@ type MatchFunctionServer interface {
|
||||
type UnimplementedMatchFunctionServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedMatchFunctionServer) Run(req *RunRequest, srv MatchFunction_RunServer) error {
|
||||
func (*UnimplementedMatchFunctionServer) Run(*RunRequest, MatchFunction_RunServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Run not implemented")
|
||||
}
|
||||
|
||||
|
@ -13,14 +13,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
@ -29,7 +29,7 @@ var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_MatchFunction_Run_0(ctx context.Context, marshaler runtime.Marshaler, client MatchFunctionClient, req *http.Request, pathParams map[string]string) (MatchFunction_RunClient, runtime.ServerMetadata, error) {
|
||||
var protoReq RunRequest
|
||||
@ -59,6 +59,7 @@ func request_MatchFunction_Run_0(ctx context.Context, marshaler runtime.Marshale
|
||||
// RegisterMatchFunctionHandlerServer registers the http handlers for service MatchFunction to "mux".
|
||||
// UnaryRPC :call MatchFunctionServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMatchFunctionHandlerFromEndpoint instead.
|
||||
func RegisterMatchFunctionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MatchFunctionServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_MatchFunction_Run_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
@ -113,7 +114,7 @@ func RegisterMatchFunctionHandlerClient(ctx context.Context, mux *runtime.ServeM
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.MatchFunction/Run")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -133,7 +134,7 @@ func RegisterMatchFunctionHandlerClient(ctx context.Context, mux *runtime.ServeM
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_MatchFunction_Run_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "matchfunction"}, "run", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_MatchFunction_Run_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "matchfunction"}, "run"))
|
||||
)
|
||||
|
||||
var (
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,248 +1,576 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0-devel
|
||||
// protoc v3.10.1
|
||||
// source: api/query.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
|
||||
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type QueryTicketsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
|
||||
}
|
||||
|
||||
func (m *QueryTicketsRequest) Reset() { *m = QueryTicketsRequest{} }
|
||||
func (m *QueryTicketsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryTicketsRequest) ProtoMessage() {}
|
||||
func (x *QueryTicketsRequest) Reset() {
|
||||
*x = QueryTicketsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *QueryTicketsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryTicketsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *QueryTicketsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryTicketsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*QueryTicketsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5ec7651f31a90698, []int{0}
|
||||
return file_api_query_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *QueryTicketsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_QueryTicketsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *QueryTicketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_QueryTicketsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *QueryTicketsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryTicketsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *QueryTicketsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_QueryTicketsRequest.Size(m)
|
||||
}
|
||||
func (m *QueryTicketsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryTicketsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryTicketsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *QueryTicketsRequest) GetPool() *Pool {
|
||||
if m != nil {
|
||||
return m.Pool
|
||||
func (x *QueryTicketsRequest) GetPool() *Pool {
|
||||
if x != nil {
|
||||
return x.Pool
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryTicketsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Tickets that meet all the filtering criteria requested by the pool.
|
||||
Tickets []*Ticket `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Tickets []*Ticket `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"`
|
||||
}
|
||||
|
||||
func (m *QueryTicketsResponse) Reset() { *m = QueryTicketsResponse{} }
|
||||
func (m *QueryTicketsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryTicketsResponse) ProtoMessage() {}
|
||||
func (x *QueryTicketsResponse) Reset() {
|
||||
*x = QueryTicketsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *QueryTicketsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryTicketsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *QueryTicketsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryTicketsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*QueryTicketsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5ec7651f31a90698, []int{1}
|
||||
return file_api_query_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *QueryTicketsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_QueryTicketsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *QueryTicketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_QueryTicketsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *QueryTicketsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryTicketsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *QueryTicketsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_QueryTicketsResponse.Size(m)
|
||||
}
|
||||
func (m *QueryTicketsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryTicketsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryTicketsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *QueryTicketsResponse) GetTickets() []*Ticket {
|
||||
if m != nil {
|
||||
return m.Tickets
|
||||
func (x *QueryTicketsResponse) GetTickets() []*Ticket {
|
||||
if x != nil {
|
||||
return x.Tickets
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryTicketIdsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
|
||||
}
|
||||
|
||||
func (m *QueryTicketIdsRequest) Reset() { *m = QueryTicketIdsRequest{} }
|
||||
func (m *QueryTicketIdsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryTicketIdsRequest) ProtoMessage() {}
|
||||
func (x *QueryTicketIdsRequest) Reset() {
|
||||
*x = QueryTicketIdsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *QueryTicketIdsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryTicketIdsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *QueryTicketIdsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryTicketIdsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*QueryTicketIdsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5ec7651f31a90698, []int{2}
|
||||
return file_api_query_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (m *QueryTicketIdsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_QueryTicketIdsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *QueryTicketIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_QueryTicketIdsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *QueryTicketIdsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryTicketIdsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *QueryTicketIdsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_QueryTicketIdsRequest.Size(m)
|
||||
}
|
||||
func (m *QueryTicketIdsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryTicketIdsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryTicketIdsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *QueryTicketIdsRequest) GetPool() *Pool {
|
||||
if m != nil {
|
||||
return m.Pool
|
||||
func (x *QueryTicketIdsRequest) GetPool() *Pool {
|
||||
if x != nil {
|
||||
return x.Pool
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type QueryTicketIdsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// TicketIDs that meet all the filtering criteria requested by the pool.
|
||||
Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
|
||||
}
|
||||
|
||||
func (m *QueryTicketIdsResponse) Reset() { *m = QueryTicketIdsResponse{} }
|
||||
func (m *QueryTicketIdsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryTicketIdsResponse) ProtoMessage() {}
|
||||
func (x *QueryTicketIdsResponse) Reset() {
|
||||
*x = QueryTicketIdsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *QueryTicketIdsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryTicketIdsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *QueryTicketIdsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryTicketIdsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*QueryTicketIdsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5ec7651f31a90698, []int{3}
|
||||
return file_api_query_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (m *QueryTicketIdsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_QueryTicketIdsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *QueryTicketIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_QueryTicketIdsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *QueryTicketIdsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryTicketIdsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *QueryTicketIdsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_QueryTicketIdsResponse.Size(m)
|
||||
}
|
||||
func (m *QueryTicketIdsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryTicketIdsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryTicketIdsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *QueryTicketIdsResponse) GetIds() []string {
|
||||
if m != nil {
|
||||
return m.Ids
|
||||
func (x *QueryTicketIdsResponse) GetIds() []string {
|
||||
if x != nil {
|
||||
return x.Ids
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*QueryTicketsRequest)(nil), "openmatch.QueryTicketsRequest")
|
||||
proto.RegisterType((*QueryTicketsResponse)(nil), "openmatch.QueryTicketsResponse")
|
||||
proto.RegisterType((*QueryTicketIdsRequest)(nil), "openmatch.QueryTicketIdsRequest")
|
||||
proto.RegisterType((*QueryTicketIdsResponse)(nil), "openmatch.QueryTicketIdsResponse")
|
||||
// BETA FEATURE WARNING: This Request messages are not finalized and
|
||||
// still subject to possible change or removal.
|
||||
type QueryBackfillsRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("api/query.proto", fileDescriptor_5ec7651f31a90698) }
|
||||
func (x *QueryBackfillsRequest) Reset() {
|
||||
*x = QueryBackfillsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
var fileDescriptor_5ec7651f31a90698 = []byte{
|
||||
// 577 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xdf, 0x4e, 0x13, 0x4f,
|
||||
0x14, 0xc7, 0xb3, 0x5b, 0x02, 0x61, 0xf8, 0xe5, 0x07, 0x8e, 0x42, 0x48, 0x63, 0x70, 0x58, 0x62,
|
||||
0x02, 0xc5, 0xee, 0x94, 0xca, 0x55, 0xd5, 0x04, 0x04, 0x2e, 0x48, 0x8a, 0x7f, 0x8a, 0xf1, 0xc2,
|
||||
0xbb, 0xe9, 0xec, 0x71, 0x3b, 0xd2, 0xce, 0x19, 0x66, 0x66, 0x41, 0x12, 0xaf, 0x8c, 0x4f, 0x20,
|
||||
0x37, 0xc6, 0x47, 0xf0, 0x25, 0x7c, 0x08, 0x5f, 0xc1, 0xf8, 0x1c, 0x66, 0x77, 0x8b, 0x94, 0x7f,
|
||||
0x26, 0x5e, 0xb5, 0x3b, 0xdf, 0xef, 0x39, 0xdf, 0xcf, 0x9c, 0x99, 0x21, 0xd3, 0xc2, 0x28, 0x7e,
|
||||
0x98, 0x81, 0x3d, 0x89, 0x8d, 0x45, 0x8f, 0x74, 0x12, 0x0d, 0xe8, 0x81, 0xf0, 0xb2, 0x57, 0xa5,
|
||||
0xb9, 0x36, 0x00, 0xe7, 0x44, 0x0a, 0xae, 0x94, 0xab, 0x77, 0x53, 0xc4, 0xb4, 0x0f, 0x3c, 0x97,
|
||||
0x84, 0xd6, 0xe8, 0x85, 0x57, 0xa8, 0xcf, 0xd4, 0x07, 0xc5, 0x8f, 0xac, 0xa7, 0xa0, 0xeb, 0xee,
|
||||
0x58, 0xa4, 0x29, 0x58, 0x8e, 0xa6, 0x70, 0x5c, 0x75, 0x47, 0x2d, 0x72, 0xfb, 0x65, 0x9e, 0xfc,
|
||||
0x4a, 0xc9, 0x03, 0xf0, 0xae, 0x03, 0x87, 0x19, 0x38, 0x4f, 0x97, 0xc8, 0x98, 0x41, 0xec, 0xcf,
|
||||
0x07, 0x2c, 0x58, 0x9e, 0x6a, 0x4e, 0xc7, 0x7f, 0x80, 0xe2, 0x17, 0x88, 0xfd, 0x4e, 0x21, 0x46,
|
||||
0x5b, 0xe4, 0xce, 0xc5, 0x5a, 0x67, 0x50, 0x3b, 0xa0, 0xab, 0x64, 0xc2, 0x97, 0x4b, 0xf3, 0x01,
|
||||
0xab, 0x2c, 0x4f, 0x35, 0x6f, 0x8d, 0xd4, 0x97, 0xe6, 0xce, 0x99, 0x23, 0x7a, 0x4c, 0x66, 0x47,
|
||||
0x9a, 0xec, 0x26, 0xff, 0x86, 0x50, 0x23, 0x73, 0x97, 0xab, 0x87, 0x10, 0x33, 0xa4, 0xa2, 0x92,
|
||||
0x12, 0x60, 0xb2, 0x93, 0xff, 0x6d, 0x9e, 0x86, 0xe4, 0xbf, 0xc2, 0xbc, 0x0f, 0xf6, 0x48, 0x49,
|
||||
0xa0, 0x1f, 0x86, 0xdf, 0x43, 0x7e, 0xba, 0x30, 0x92, 0x71, 0xcd, 0x50, 0xaa, 0xf7, 0x6e, 0xd4,
|
||||
0xcb, 0xcc, 0x68, 0xe5, 0xe3, 0x8f, 0x9f, 0xa7, 0xe1, 0x52, 0xb4, 0xc0, 0x8f, 0xd6, 0xca, 0x03,
|
||||
0x75, 0x65, 0x14, 0x1f, 0xee, 0xb6, 0x55, 0x2c, 0xb6, 0x82, 0x5a, 0x23, 0xa0, 0x9f, 0x02, 0xf2,
|
||||
0xff, 0x45, 0x76, 0xca, 0xae, 0x0f, 0x38, 0x1f, 0x4a, 0x75, 0xf1, 0x2f, 0x8e, 0x21, 0xc4, 0x6a,
|
||||
0x01, 0x71, 0x3f, 0x62, 0x37, 0x40, 0xa8, 0x64, 0x14, 0xe3, 0xe9, 0x97, 0xca, 0xe7, 0xcd, 0x5f,
|
||||
0x21, 0xfd, 0x1e, 0x90, 0xd9, 0xbd, 0x3d, 0xd6, 0xc6, 0x54, 0x49, 0xb6, 0xbc, 0x2d, 0xbc, 0x60,
|
||||
0x6d, 0x71, 0x02, 0x76, 0x25, 0xda, 0x25, 0xe4, 0xb9, 0x01, 0xcd, 0xf6, 0xf2, 0x50, 0x3a, 0xd7,
|
||||
0xf3, 0xde, 0xb8, 0x16, 0xe7, 0x39, 0x47, 0xbd, 0x04, 0x49, 0xe0, 0xa8, 0xba, 0x74, 0xfe, 0x5d,
|
||||
0x4f, 0x94, 0x93, 0x99, 0x73, 0x1b, 0xe5, 0x35, 0x4d, 0x2d, 0x66, 0xc6, 0xc5, 0x12, 0x07, 0xb5,
|
||||
0xd7, 0x84, 0x6e, 0x1a, 0x21, 0x7b, 0xc0, 0x9a, 0x71, 0x83, 0xb5, 0x95, 0x84, 0xfc, 0xa4, 0x36,
|
||||
0xce, 0x5a, 0xa6, 0xca, 0xf7, 0xb2, 0x6e, 0xee, 0xe4, 0x65, 0xe9, 0x5b, 0xb4, 0xa9, 0x18, 0x80,
|
||||
0x1b, 0x09, 0xe3, 0xdd, 0x3e, 0x76, 0xf9, 0x40, 0x38, 0x0f, 0x96, 0xb7, 0x77, 0xb7, 0x76, 0x9e,
|
||||
0xed, 0xef, 0x34, 0x2b, 0x6b, 0x71, 0xa3, 0x16, 0x06, 0x61, 0x73, 0x46, 0x18, 0xd3, 0x57, 0xb2,
|
||||
0xb8, 0xe1, 0xfc, 0x9d, 0x43, 0xdd, 0xba, 0xb2, 0xd2, 0x79, 0x44, 0x2a, 0xeb, 0x8d, 0x75, 0xba,
|
||||
0x4e, 0x6a, 0x1d, 0xf0, 0x99, 0xd5, 0x90, 0xb0, 0xe3, 0x1e, 0x68, 0xe6, 0x7b, 0xc0, 0x2c, 0x38,
|
||||
0xcc, 0xac, 0x04, 0x96, 0x20, 0x38, 0xa6, 0xd1, 0x33, 0x78, 0xaf, 0x9c, 0x8f, 0xe9, 0x38, 0x19,
|
||||
0xfb, 0x1a, 0x06, 0x13, 0xf6, 0x09, 0x99, 0x3f, 0x1f, 0x06, 0xdb, 0x46, 0x99, 0x0d, 0x40, 0x97,
|
||||
0x2f, 0x8a, 0x2e, 0x5e, 0x3f, 0x1a, 0xee, 0x94, 0x07, 0x9e, 0xa0, 0x74, 0xfc, 0x0d, 0xbb, 0x24,
|
||||
0x8d, 0xec, 0xcb, 0x1c, 0xa4, 0xdc, 0x74, 0xbf, 0x85, 0x93, 0x79, 0xff, 0xa2, 0x7d, 0x77, 0xbc,
|
||||
0x78, 0xa2, 0x0f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x69, 0xc6, 0x96, 0x66, 0x20, 0x04, 0x00,
|
||||
0x00,
|
||||
func (x *QueryBackfillsRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryBackfillsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *QueryBackfillsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryBackfillsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*QueryBackfillsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_query_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *QueryBackfillsRequest) GetPool() *Pool {
|
||||
if x != nil {
|
||||
return x.Pool
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BETA FEATURE WARNING: This Request messages are not finalized and
|
||||
// still subject to possible change or removal.
|
||||
type QueryBackfillsResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Backfills that meet all the filtering criteria requested by the pool.
|
||||
Backfills []*Backfill `protobuf:"bytes,1,rep,name=backfills,proto3" json:"backfills,omitempty"`
|
||||
}
|
||||
|
||||
func (x *QueryBackfillsResponse) Reset() {
|
||||
*x = QueryBackfillsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_api_query_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *QueryBackfillsResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*QueryBackfillsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *QueryBackfillsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_query_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use QueryBackfillsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*QueryBackfillsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_query_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *QueryBackfillsResponse) GetBackfills() []*Backfill {
|
||||
if x != nil {
|
||||
return x.Backfills
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_api_query_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_api_query_proto_rawDesc = []byte{
|
||||
0x0a, 0x0f, 0x61, 0x70, 0x69, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x09, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x12, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61,
|
||||
0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3a,
|
||||
0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e,
|
||||
0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x22, 0x43, 0x0a, 0x14, 0x51, 0x75,
|
||||
0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x2b, 0x0a, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e,
|
||||
0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x22,
|
||||
0x3c, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x70, 0x6f, 0x6f, 0x6c,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x22, 0x2a, 0x0a,
|
||||
0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x3c, 0x0a, 0x15, 0x51, 0x75, 0x65,
|
||||
0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x50, 0x6f, 0x6f,
|
||||
0x6c, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x22, 0x4b, 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79,
|
||||
0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x31, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x66,
|
||||
0x69, 0x6c, 0x6c, 0x73, 0x32, 0x9a, 0x03, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69,
|
||||
0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63,
|
||||
0x68, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x2f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x3a, 0x01,
|
||||
0x2a, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63,
|
||||
0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x12, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d,
|
||||
0x61, 0x74, 0x63, 0x68, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74,
|
||||
0x49, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x25, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x69, 0x64, 0x73, 0x3a,
|
||||
0x71, 0x75, 0x65, 0x72, 0x79, 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0e, 0x51,
|
||||
0x75, 0x65, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x12, 0x20, 0x2e,
|
||||
0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x42,
|
||||
0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x51, 0x75, 0x65, 0x72,
|
||||
0x79, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x20, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x62, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x3a, 0x01, 0x2a, 0x30,
|
||||
0x01, 0x42, 0x98, 0x03, 0x5a, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||
0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f,
|
||||
0x70, 0x6b, 0x67, 0x2f, 0x70, 0x62, 0xaa, 0x02, 0x09, 0x4f, 0x70, 0x65, 0x6e, 0x4d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x92, 0x41, 0xe6, 0x02, 0x12, 0xbf, 0x01, 0x0a, 0x15, 0x4d, 0x4d, 0x20, 0x4c, 0x6f,
|
||||
0x67, 0x69, 0x63, 0x20, 0x28, 0x44, 0x61, 0x74, 0x61, 0x20, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x29,
|
||||
0x22, 0x49, 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16,
|
||||
0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x1a, 0x23, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74,
|
||||
0x63, 0x68, 0x2d, 0x64, 0x69, 0x73, 0x63, 0x75, 0x73, 0x73, 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2a, 0x56, 0x0a, 0x12, 0x41,
|
||||
0x70, 0x61, 0x63, 0x68, 0x65, 0x20, 0x32, 0x2e, 0x30, 0x20, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x40, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x66, 0x6f, 0x72, 0x67,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f,
|
||||
0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x4c, 0x49, 0x43, 0x45,
|
||||
0x4e, 0x53, 0x45, 0x32, 0x03, 0x31, 0x2e, 0x30, 0x2a, 0x02, 0x01, 0x02, 0x32, 0x10, 0x61, 0x70,
|
||||
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x3a, 0x10,
|
||||
0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e,
|
||||
0x52, 0x3b, 0x0a, 0x03, 0x34, 0x30, 0x34, 0x12, 0x34, 0x0a, 0x2a, 0x52, 0x65, 0x74, 0x75, 0x72,
|
||||
0x6e, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73,
|
||||
0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65,
|
||||
0x78, 0x69, 0x73, 0x74, 0x2e, 0x12, 0x06, 0x0a, 0x04, 0x9a, 0x02, 0x01, 0x07, 0x72, 0x3d, 0x0a,
|
||||
0x18, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x44, 0x6f, 0x63, 0x75,
|
||||
0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x68, 0x74, 0x74, 0x70, 0x73,
|
||||
0x3a, 0x2f, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65,
|
||||
0x76, 0x2f, 0x73, 0x69, 0x74, 0x65, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2f, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_api_query_proto_rawDescOnce sync.Once
|
||||
file_api_query_proto_rawDescData = file_api_query_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_api_query_proto_rawDescGZIP() []byte {
|
||||
file_api_query_proto_rawDescOnce.Do(func() {
|
||||
file_api_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_query_proto_rawDescData)
|
||||
})
|
||||
return file_api_query_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_query_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_api_query_proto_goTypes = []interface{}{
|
||||
(*QueryTicketsRequest)(nil), // 0: openmatch.QueryTicketsRequest
|
||||
(*QueryTicketsResponse)(nil), // 1: openmatch.QueryTicketsResponse
|
||||
(*QueryTicketIdsRequest)(nil), // 2: openmatch.QueryTicketIdsRequest
|
||||
(*QueryTicketIdsResponse)(nil), // 3: openmatch.QueryTicketIdsResponse
|
||||
(*QueryBackfillsRequest)(nil), // 4: openmatch.QueryBackfillsRequest
|
||||
(*QueryBackfillsResponse)(nil), // 5: openmatch.QueryBackfillsResponse
|
||||
(*Pool)(nil), // 6: openmatch.Pool
|
||||
(*Ticket)(nil), // 7: openmatch.Ticket
|
||||
(*Backfill)(nil), // 8: openmatch.Backfill
|
||||
}
|
||||
var file_api_query_proto_depIdxs = []int32{
|
||||
6, // 0: openmatch.QueryTicketsRequest.pool:type_name -> openmatch.Pool
|
||||
7, // 1: openmatch.QueryTicketsResponse.tickets:type_name -> openmatch.Ticket
|
||||
6, // 2: openmatch.QueryTicketIdsRequest.pool:type_name -> openmatch.Pool
|
||||
6, // 3: openmatch.QueryBackfillsRequest.pool:type_name -> openmatch.Pool
|
||||
8, // 4: openmatch.QueryBackfillsResponse.backfills:type_name -> openmatch.Backfill
|
||||
0, // 5: openmatch.QueryService.QueryTickets:input_type -> openmatch.QueryTicketsRequest
|
||||
2, // 6: openmatch.QueryService.QueryTicketIds:input_type -> openmatch.QueryTicketIdsRequest
|
||||
4, // 7: openmatch.QueryService.QueryBackfills:input_type -> openmatch.QueryBackfillsRequest
|
||||
1, // 8: openmatch.QueryService.QueryTickets:output_type -> openmatch.QueryTicketsResponse
|
||||
3, // 9: openmatch.QueryService.QueryTicketIds:output_type -> openmatch.QueryTicketIdsResponse
|
||||
5, // 10: openmatch.QueryService.QueryBackfills:output_type -> openmatch.QueryBackfillsResponse
|
||||
8, // [8:11] is the sub-list for method output_type
|
||||
5, // [5:8] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_query_proto_init() }
|
||||
func file_api_query_proto_init() {
|
||||
if File_api_query_proto != nil {
|
||||
return
|
||||
}
|
||||
file_api_messages_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_api_query_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryTicketsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_query_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryTicketsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_query_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryTicketIdsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_query_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryTicketIdsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_query_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryBackfillsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_api_query_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*QueryBackfillsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_api_query_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_api_query_proto_goTypes,
|
||||
DependencyIndexes: file_api_query_proto_depIdxs,
|
||||
MessageInfos: file_api_query_proto_msgTypes,
|
||||
}.Build()
|
||||
File_api_query_proto = out.File
|
||||
file_api_query_proto_rawDesc = nil
|
||||
file_api_query_proto_goTypes = nil
|
||||
file_api_query_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// QueryServiceClient is the client API for QueryService service.
|
||||
//
|
||||
@ -258,13 +586,17 @@ type QueryServiceClient interface {
|
||||
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
|
||||
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
|
||||
QueryTicketIds(ctx context.Context, in *QueryTicketIdsRequest, opts ...grpc.CallOption) (QueryService_QueryTicketIdsClient, error)
|
||||
// QueryBackfills gets a list of Backfills.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
QueryBackfills(ctx context.Context, in *QueryBackfillsRequest, opts ...grpc.CallOption) (QueryService_QueryBackfillsClient, error)
|
||||
}
|
||||
|
||||
type queryServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewQueryServiceClient(cc *grpc.ClientConn) QueryServiceClient {
|
||||
func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient {
|
||||
return &queryServiceClient{cc}
|
||||
}
|
||||
|
||||
@ -332,6 +664,38 @@ func (x *queryServiceQueryTicketIdsClient) Recv() (*QueryTicketIdsResponse, erro
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *queryServiceClient) QueryBackfills(ctx context.Context, in *QueryBackfillsRequest, opts ...grpc.CallOption) (QueryService_QueryBackfillsClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_QueryService_serviceDesc.Streams[2], "/openmatch.QueryService/QueryBackfills", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &queryServiceQueryBackfillsClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type QueryService_QueryBackfillsClient interface {
|
||||
Recv() (*QueryBackfillsResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type queryServiceQueryBackfillsClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *queryServiceQueryBackfillsClient) Recv() (*QueryBackfillsResponse, error) {
|
||||
m := new(QueryBackfillsResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// QueryServiceServer is the server API for QueryService service.
|
||||
type QueryServiceServer interface {
|
||||
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
|
||||
@ -344,18 +708,25 @@ type QueryServiceServer interface {
|
||||
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
|
||||
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
|
||||
QueryTicketIds(*QueryTicketIdsRequest, QueryService_QueryTicketIdsServer) error
|
||||
// QueryBackfills gets a list of Backfills.
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
QueryBackfills(*QueryBackfillsRequest, QueryService_QueryBackfillsServer) error
|
||||
}
|
||||
|
||||
// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedQueryServiceServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedQueryServiceServer) QueryTickets(req *QueryTicketsRequest, srv QueryService_QueryTicketsServer) error {
|
||||
func (*UnimplementedQueryServiceServer) QueryTickets(*QueryTicketsRequest, QueryService_QueryTicketsServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method QueryTickets not implemented")
|
||||
}
|
||||
func (*UnimplementedQueryServiceServer) QueryTicketIds(req *QueryTicketIdsRequest, srv QueryService_QueryTicketIdsServer) error {
|
||||
func (*UnimplementedQueryServiceServer) QueryTicketIds(*QueryTicketIdsRequest, QueryService_QueryTicketIdsServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method QueryTicketIds not implemented")
|
||||
}
|
||||
func (*UnimplementedQueryServiceServer) QueryBackfills(*QueryBackfillsRequest, QueryService_QueryBackfillsServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method QueryBackfills not implemented")
|
||||
}
|
||||
|
||||
func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) {
|
||||
s.RegisterService(&_QueryService_serviceDesc, srv)
|
||||
@ -403,6 +774,27 @@ func (x *queryServiceQueryTicketIdsServer) Send(m *QueryTicketIdsResponse) error
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _QueryService_QueryBackfills_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(QueryBackfillsRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(QueryServiceServer).QueryBackfills(m, &queryServiceQueryBackfillsServer{stream})
|
||||
}
|
||||
|
||||
type QueryService_QueryBackfillsServer interface {
|
||||
Send(*QueryBackfillsResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type queryServiceQueryBackfillsServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *queryServiceQueryBackfillsServer) Send(m *QueryBackfillsResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
var _QueryService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "openmatch.QueryService",
|
||||
HandlerType: (*QueryServiceServer)(nil),
|
||||
@ -418,6 +810,11 @@ var _QueryService_serviceDesc = grpc.ServiceDesc{
|
||||
Handler: _QueryService_QueryTicketIds_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "QueryBackfills",
|
||||
Handler: _QueryService_QueryBackfills_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "api/query.proto",
|
||||
}
|
||||
|
@ -13,14 +13,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
@ -29,7 +29,7 @@ var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_QueryService_QueryTickets_0(ctx context.Context, marshaler runtime.Marshaler, client QueryServiceClient, req *http.Request, pathParams map[string]string) (QueryService_QueryTicketsClient, runtime.ServerMetadata, error) {
|
||||
var protoReq QueryTicketsRequest
|
||||
@ -81,9 +81,35 @@ func request_QueryService_QueryTicketIds_0(ctx context.Context, marshaler runtim
|
||||
|
||||
}
|
||||
|
||||
func request_QueryService_QueryBackfills_0(ctx context.Context, marshaler runtime.Marshaler, client QueryServiceClient, req *http.Request, pathParams map[string]string) (QueryService_QueryBackfillsClient, runtime.ServerMetadata, error) {
|
||||
var protoReq QueryBackfillsRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
stream, err := client.QueryBackfills(ctx, &protoReq)
|
||||
if err != nil {
|
||||
return nil, metadata, err
|
||||
}
|
||||
header, err := stream.Header()
|
||||
if err != nil {
|
||||
return nil, metadata, err
|
||||
}
|
||||
metadata.HeaderMD = header
|
||||
return stream, metadata, nil
|
||||
|
||||
}
|
||||
|
||||
// RegisterQueryServiceHandlerServer registers the http handlers for service QueryService to "mux".
|
||||
// UnaryRPC :call QueryServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryServiceHandlerFromEndpoint instead.
|
||||
func RegisterQueryServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServiceServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_QueryService_QueryTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
@ -100,6 +126,13 @@ func RegisterQueryServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
|
||||
return
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_QueryService_QueryBackfills_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
|
||||
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -145,7 +178,7 @@ func RegisterQueryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.QueryService/QueryTickets")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -165,7 +198,7 @@ func RegisterQueryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.QueryService/QueryTicketIds")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@ -181,17 +214,41 @@ func RegisterQueryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_QueryService_QueryBackfills_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/openmatch.QueryService/QueryBackfills")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_QueryService_QueryBackfills_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_QueryService_QueryBackfills_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_QueryService_QueryTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "tickets"}, "query", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_QueryService_QueryTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "tickets"}, "query"))
|
||||
|
||||
pattern_QueryService_QueryTicketIds_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "ticketids"}, "query", runtime.AssumeColonVerbOpt(true)))
|
||||
pattern_QueryService_QueryTicketIds_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "ticketids"}, "query"))
|
||||
|
||||
pattern_QueryService_QueryBackfills_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "backfills"}, "query"))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_QueryService_QueryTickets_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_QueryService_QueryTicketIds_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_QueryService_QueryBackfills_0 = runtime.ForwardResponseStream
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user