Compare commits

..

68 Commits

Author SHA1 Message Date
1d4d38f049 Revert "Release 1.2 (#1364)"
This reverts commit 9b2c5de4a4eca840e4f5483ff99350936b897c88.
2021-04-02 18:02:37 -04:00
9b2c5de4a4 Release 1.2 (#1364)
Release 1.2
2021-03-30 17:48:34 -04:00
ad1ca16218 Add string err comparisons to backfill e2e (#1344)
Make failure output more readable.
2021-01-20 17:00:56 +03:00
7d849f3f04 Backfill: Skip not found errors on Backend (#1341)
Backfill: Skip not found errors on Backend
There could be the case when backfill returned by the MMF was deleted
in CleanupBackfills.
Add UT to check that error was skipped
2021-01-20 01:30:01 +03:00
05c8c8aa76 Fix leftover after 1080 PR (#1342) 2021-01-19 12:48:15 -08:00
f50c9eec80 Minor fixing some typos (#1343) 2021-01-19 11:26:17 -08:00
c6f23f01ca Improve proto comments (#1340) 2021-01-19 11:05:21 -08:00
21efdb6691 Move Cleanup Backfills after main SynchronizerCycle & add workers pool (#1334)
Use workers in cleanup process. Move backfill cleanup to the end of sync cycle.
TestCleanUpExpiredBackfills call FetchMatches twice.
2021-01-19 10:43:08 +03:00
81a1dc38b6 add fix in helm chart to use custom redis instance (#1330)
Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-18 16:52:22 +03:00
d0ddf22658 Expired backfills can not be updated or acknowledged (#1335)
* do not acknowledge expired backfills

* use NoError

* parse ZSCORE response to float, not to int

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-18 15:48:54 +03:00
ee247c6c1a Updated release steps. Added additional step to publish release notes to OM Blog (#1338) 2021-01-15 17:26:56 -05:00
a17eb3bc72 Fix proto comments for better markdown output (#1331) 2021-01-15 11:26:14 -08:00
3d194f541e Add help comments in Makefile (#1332)
* Add help comments in Makefile

* Delete utilities subtitle

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>

* Reorder subtitle definition

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-14 16:43:51 +03:00
3a0cd7611b Move the Redis chart to bitnami as update to 12.3.3 (#1315) 2021-01-12 11:58:52 -08:00
c13b461795 Make redis lock expiration configurable (#1325) 2021-01-07 13:29:27 -08:00
b9e55fc727 Add pod tolerations, nodeSelector and affinity in helm for subcharts (#1311)
Fix #1015

Co-authored-by: Scott Redig <sredig@google.com>
2021-01-07 13:05:30 -08:00
dd1386a55b Clean up expired backfills (#1297)
Add `CleanupBackfills()` call to synchronizer.
Put delete backfill logic to statestore.
Add mutex to DeleteBackfillCompletely and update deleteBackfill test.
Remove goroutine.
* use new context in CleanupBackfills().
* move cleanup to the start of the Synchronizer sync cycle.
Co-authored-by: Alexander <alexander.apalikov@globant.com>
2020-12-30 15:41:33 +03:00
defac9065b Frontend acknowledge backfill (#1293)
* Frontend: Add AcknowledgeBackfill method
Update Tickets associated with backfill, remove all assigned

Add Mutex lock, UpdateBackfill accordingly after UpdateAssignments call.
New function name seems more reasonable as it do only Redis timestamp
updates, doUpdateAcknowledgmentTimestamp.

* Add Generation autoincrement test in test helper func
Add more logic as in doAssign() function
Deindex tickets and add error logging for all NotFound
tickets.
2020-12-28 16:37:29 +03:00
f203384fbf Add comments to MMF backfill example (#1320)
Add comments to MatchMaking Function with Backfill example.
It creates matches with Backfills first, then full matches with 1 vs 1 player match, and if number of players left is 1 create a match with new Backfill in it.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 22:14:53 +03:00
7ef9c052bd Update backend service (#1318)
Add missing Backfill indexing on Create or Update Backfill on backend.
Release tickets when backfill generation mismatch happens
Refactored - new doRelease() function for tickets.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 21:43:00 +03:00
ea744b8b51 Fix install-scale-chart target (#1322)
* Implement backfill querying

* Update location for stable and incubator charts

* Add MMF backfill example

* Simplify MMF backfill example

* Render jaeger configuration if it is enabled

Helm fails to install open-match chart with disabled jaeger because it cannot find
openmatch.jaeger.agent template which is declared in jaeger subchart. Helm is not able to
find that template because jaeger subchart is not loaded because it is marked as disabled
in open-match chart dependencies.

* Update install-scale-chart target

Currently open-match-scale subchart is installed separately from open-match chart but they are tightly coupled.
Pods declared in scale subchart have dependencies on service accounts, config maps provisioned by open-match
chart. So the problem is that helm renders incorrect service account, config map names. It can be fixed by
specifying explicit names in install-scale-chart target.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 21:14:27 +03:00
1a8fc62833 add @sawagh to codeowners (#1319) 2020-12-21 20:51:50 +03:00
1d5574b8a3 MMF backfill example (#1317)
* Implement backfill querying

* Update location for stable and incubator charts

* Add MMF backfill example

* Simplify MMF backfill example

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-18 18:28:32 +03:00
75a3d43477 Fix typo (#1305)
And trigger e2e-cluster tests on master.
2020-12-18 12:30:34 +03:00
252fc8090d Backfill: Autoincrement generation on every Backfill update (#1308)
* Backfill: Autoincrement generation on every Backfill update

In order Backfill Cache to work in QueryBackfill, every update should
store a backfill as a new Generation Backfill.

In the future Generation could be renamed to Version field in Backfill,
one change at a time.

* Update Generation on Backend and Frontend Updates

No updates on AcknowledgeBackfill.

* Fix tests after merging master

Add initial Generation as 1 everywhere - on CreateBackfill from Backend
and Frontend.

* Add missing license header
2020-12-17 18:12:24 +03:00
2c617f2cb6 Update location for stable and incubator charts (#1314)
* Implement backfill querying

* Update location for stable and incubator charts
2020-12-17 13:36:16 +03:00
fcd590eca6 Implement backfill querying (#1310) 2020-12-16 23:15:51 +03:00
4b3147511b create CODEOWNERS
list of those with review perms for easy PR review notifications
2020-12-14 15:29:58 -08:00
c85af44567 Frontend: UpdateBackfill and DeleteBackfill handlers (#1292) 2020-12-03 10:58:24 -08:00
688262111d Create, Update backfill after MMF run (#1299) 2020-12-02 17:51:27 -08:00
26d1aa236a Redis: Backfill last acknowledged (#1288) 2020-12-01 21:52:52 -08:00
fff37cd82c Update autogeneretaded protobuf files and Swagger for Frontend (#1295) 2020-11-30 10:26:34 -08:00
98a227b515 Update go.sum (#1296) 2020-11-30 09:59:35 -08:00
88cd95fe57 Backfill indexing (#1290) 2020-11-29 22:16:09 -08:00
248494c04c Frontend Create Backfill (#1279) 2020-11-25 22:54:56 -08:00
aa4398e786 Improve comments for RPC funcs (#1287) 2020-11-23 11:25:28 -08:00
fc5c3629e8 fixed the wrong spelling (#1291) 2020-11-23 09:43:30 -08:00
8d86709632 Makefile update to make api/api.md target commands universal across various environments (#1283) 2020-11-19 22:22:54 -08:00
0a273674b9 Update supported gke version for create-gke-cluster target (#1289) 2020-11-19 22:05:39 -08:00
e2247a7f53 Add Backfill support to internal statestore (#1273) 2020-11-19 14:20:55 -08:00
b269896c23 Undo change that I shouldn't have been able to do 2020-11-16 16:35:07 -08:00
a210185098 Testing change to build system, DO NOT SUBMIT 2020-11-16 16:33:49 -08:00
4df95deb54 Added test for unavailable gRPC function (#1282) 2020-11-12 21:29:40 -08:00
a9b8eec9e0 Updating the dependencies for the project (#1281)
* Updated dependencies
* Updated tutorials dependencies
* Updated tests
2020-11-12 15:21:07 -05:00
afa59327a4 Add ability to filter backfills (#1278) 2020-11-08 21:57:57 -08:00
d86b6c5121 Add comments when displaying makefile usage (#1276) 2020-11-03 10:50:33 -08:00
2eb2921914 Adding Exclude property to DoubleRangeFilter and test coverage. (#1268) 2020-11-02 13:46:09 -08:00
80d882b7c7 Consider backfill's id when de-colliding matches (#1277) 2020-11-02 11:53:27 -08:00
0f34e31778 New fields in protobuf definitions (#1272) 2020-10-30 11:45:51 -07:00
d45eb74510 Revert "Unavailable gRPC match functions forces us to wait the proposalCollectionInterval before failing (#1271)" (#1275)
This reverts commit 1765ab7b7e8fcc24015f5c40938c661f82bdbc9a.
2020-10-29 11:58:32 -07:00
1765ab7b7e Unavailable gRPC match functions forces us to wait the proposalCollectionInterval before failing (#1271) 2020-10-28 11:30:30 -07:00
6f05e526fb Improved tests for statestore - redis (#1264) 2020-10-12 19:21:51 -07:00
496d156faa Added unary interceptor and removed extra logs (#1255)
* added unary interceptor and removed logs from frontend service

* removed extra logs from backend serrvice

* updated evaluator logging

* updated query logging


linter fix

* fix

Co-authored-by: Scott Redig <sredig@google.com>
2020-09-21 15:02:29 -07:00
3a3d618c43 Replaced GS bucket links with substitution variables (#1262) 2020-09-21 12:22:03 -07:00
e1cbd855f5 Added time to assignment metrics to backend (#1241)
* Added time to assignment metrics to backend

- The time to match for tickets is now recorded as a metric

* Fixed formatting errors

* Fixed minor review changes

- Renamed function to calculate time to assignment
- Moved from callback to returning tickets from UpdateAssignments

* Return only successfully assigned tickets

* Fixed linting errors
2020-09-15 11:18:17 -07:00
10b36705f0 Tests update: use require assertion (#1257)
* use require in filter package


fix

* use require in rpc package

* use require in tools/certgen package

* use require in mmf package

* use require in telemetry and logging


fix

Co-authored-by: Scott Redig <sredig@google.com>
2020-09-09 14:24:18 -07:00
a6fc4724bc Fix spelling in Proto files (#1256)
Regenerated dependent Swagger and Golang files.
2020-09-09 12:20:29 -07:00
511337088a Reduce logging in statestore - redis (#1248)
* reduce logging in statestore - redis  #1228


fix

* added grpc interceptors to log errors

lint fix

Co-authored-by: Scott Redig <sredig@google.com>
2020-09-02 12:50:39 -07:00
5f67bb36a6 Use require in app tests and improve error messages (#1253) 2020-08-31 13:17:29 -07:00
94d2105809 Use require in tests to avoid nil pointer exceptions (#1249)
* use require in tests to avoid nil pointer exceptions

* statestore tests: replaced assert with require
2020-08-28 12:19:53 -07:00
d85f1f4bc7 Added a PR template (#1250) 2020-08-25 14:16:36 -07:00
79e9afeca7 Use Helm release to name resources (#1246)
* Fix indent of TLS certificate annotations

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Small whitespace fixes

Picked up the VSCode Yaml auto-formatter.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Don't pass 'query' config to open-match-customize

It's not used.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Don't pass frontend/backend to open-match-scale

They're not used.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Allow redis to derive resource names from the release

This ensures that multiple OpenMatch installs in a single namespace do
not attempt to install Redis stacks with the same resource names.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Include release names in PodSecurityPolicies

This avoids conflicts between multiple Open Match installations in the
same namespace.

`openmatch.fullname` named template per Helm default chart.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make the Service Account name release-dependent

This makes the existing global.kubernetes.serviceAccount value an
override if specified, but if left unspecified, an appropriate name will
be chosen.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make the RBAC resource names release-dependent

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make the TLS Secret names release-dependent

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make the CI-test resource names release-dependent

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make all Pod/Service names release-dependent

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make Grafana dashboard names release-dependent

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make open-match-scale slightly more standalone

This makes the hostname templates more standard in their case, because
there is no need to coordinate the hostname with the superchart.

This chart still uses a lot of templates from the open-match chart
though, so it's not yet standalone-installable.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Make ConfigMap default names release-dependent

A specific ConfigMap can be applied in the same way it was previously,
by overriding configs.default.configName and
configs.override.configName, in which case it is up to the person doing
the deployment to manage name conflicts.

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Use correct Jaeger service names for subcharts

This fixes an existing issue where the Jaeger connection URLs in
the configuration would be incorrect if your Helm chart was not
installed as a release named "open-match".

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>

* Populate Grafana Datasource using a ConfigMap

This allows us to access the Prometheus subchart's named template to get
the correct Service name for the datasource.

This fixes an existing issue where the Prometheus data source URL in
Grafana would be incorrect if your Helm chart was not installed as
a release named "open-match".

Signed-off-by: Paul "Hampy" Hampson <p_hampson@wargaming.net>
2020-08-17 12:04:26 -07:00
3334f7f74a Make: fix create-gke-cluster, create clusterRole (#1234)
If there are multiple `gcloud auth list` accounts the command would fail,
adding grep active to fix.
2020-07-10 10:57:16 -07:00
85ce954eb9 Update backend_service.go (#1233)
Fixed typo
2020-07-09 11:45:33 -07:00
679cfb5839 Rename Ignore list to Pending Release (#1230)
Fix naming across all code. Swagger changes left.

Co-authored-by: Scott Redig <sredig@google.com>
2020-07-08 13:56:30 -07:00
c53a5b7c88 Update Swagger JSONs as well as go proto files (#1231)
Output of run make presubmit on master.

Co-authored-by: Scott Redig <sredig@google.com>
2020-07-08 12:52:51 -07:00
cfb316169a Use supported GKE cluster version (#1232)
Update Makefile.
2020-07-08 12:25:53 -07:00
a9365b5333 fix release.sh not knowing the right images (#1219) 2020-06-01 11:05:27 -07:00
211 changed files with 24896 additions and 5458 deletions

1
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1 @@
* @laremere @aLekSer @HazWard @calebatwd @jonfoust @sawagh

View File

@ -114,7 +114,6 @@ git push origin release-0.5
- [ ] There might be additional references to the old version but be careful not to change it for places that have it for historical purposes.
- [ ] Run `make release`
- [ ] Run `make api/api.md` in open-match repo to update the auto-generated API references in open-match-docs repo.
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
- [ ] Create a PR with the changes, include the release candidate name, and point it to the release branch.
- [ ] Go to [open-match-build](https://pantheon.corp.google.com/cloud-build/triggers?project=open-match-build) and update all *post submit* triggers' `_GCB_LATEST_VERSION` value to the `X.Y` of the release. This value should only increase as it's used to determine the latest stable version.
- [ ] Merge your changes once the PR is approved.
@ -152,6 +151,7 @@ only required once.**
- [ ] Go to the History section and find the "Post Submit" build of the merged commit that's running. Wait for it to go Green. If it's red, fix error repeat this section. Take note of the docker image version tag for next step. Example: 0.5.0-a4706cb.
- [ ] Run `./docs/governance/templates/release.sh {source version tag} {version}` to copy the images to open-match-public-images.
- [ ] If this is a new minor version in the newest major version then run `./docs/governance/templates/release.sh {source version tag} latest`.
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
- [ ] Copy the files from `build/release/` generated from `make release` to the release draft you created. You can drag and drop the files using the Github UI.
- [ ] Update [Slack invitation link](https://slack.com/help/articles/201330256-invite-new-members-to-your-workspace#share-an-invite-link) in [open-match.dev](https://open-match.dev/site/docs/contribute/#get-involved).
- [ ] Test Open Match installation under GKE and Minikube enviroment using YAML files and Helm. Follow the [First Match](https://development.open-match.dev/site/docs/getting-started/first_match/) guide, run `make proxy-demo`, and open `localhost:51507` to make sure everything works.
@ -165,6 +165,7 @@ only required once.**
- [ ] Save the release as a draft.
- [ ] Circulate the draft release to active contributors. Where reasonable, get everyone's ok on the release notes before continuing.
- [ ] Publish the [Release](om-release) in Github. This will notify repository watchers.
- [ ] Publish the [Release](om-release) on Open Match [Blog](https://open-match.dev/site/blog/).
## Announce

16
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,16 @@
<!-- Thanks for sending a pull request! Here are some tips for you:
If this is your first time, please read our contributor guidelines: https://github.com/googleforgames/open-match/blob/master/CONTRIBUTING.md and developer guide https://github.com/googleforgames/open-match/blob/master/docs/development.md
-->
**What this PR does / Why we need it**:
**Which issue(s) this PR fixes**:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Closes #<issue number>`, or `Closes (paste link of issue)`.
-->
Closes #
**Special notes for your reviewer**:

159
Makefile
View File

@ -15,44 +15,45 @@
## Open Match Make Help
## ====================
##
## Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## # Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## make activate-gcp-apis
## make create-gke-cluster push-helm
##
## Create a Minikube Cluster (requires VirtualBox)
## # Create a Minikube Cluster (requires VirtualBox)
## make create-mini-cluster push-helm
##
## Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## # Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## make create-kind-cluster get-kind-kubeconfig
## Finish KinD setup by installing helm:
##
## # Finish KinD setup by installing helm:
## make push-helm
##
## Deploy Open Match
## # Deploy Open Match
## make push-images -j$(nproc)
## make install-chart
##
## Build and Test
## # Build and Test
## make all -j$(nproc)
## make test
##
## Access telemetry
## # Access telemetry
## make proxy-prometheus
## make proxy-grafana
## make proxy-ui
##
## Teardown
## # Teardown
## make delete-mini-cluster
## make delete-gke-cluster
## make delete-kind-cluster && export KUBECONFIG=""
##
## Prepare a Pull Request
## # Prepare a Pull Request
## make presubmit
##
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 1.0.0
BASE_VERSION = 0.0.0-dev
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -123,7 +124,7 @@ GCLOUD = gcloud --quiet
OPEN_MATCH_HELM_NAME = open-match
OPEN_MATCH_KUBERNETES_NAMESPACE = open-match
OPEN_MATCH_SECRETS_DIR = $(REPOSITORY_ROOT)/install/helm/open-match/secrets
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep account: | cut -c 10-)
GCLOUD_ACCOUNT_EMAIL = $(shell gcloud auth list --format yaml | grep ACTIVE -a2 | grep account: | cut -c 10-)
_GCB_POST_SUBMIT ?= 0
# Latest version triggers builds of :latest images.
_GCB_LATEST_VERSION ?= undefined
@ -187,7 +188,7 @@ else
endif
endif
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go internal/ipb/messages.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
SWAGGER_JSON_DOCS = api/frontend.swagger.json api/backend.swagger.json api/query.swagger.json api/matchfunction.swagger.json api/evaluator.swagger.json
@ -197,7 +198,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel base-build
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-backfill base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -209,14 +210,18 @@ local-cloud-build: gcloud
################################################################################
## #############################################################################
## Image commands:
## These commands are auto-generated based on a complete list of images. All
## folders in cmd/ are turned into an image using Dockerfile.cmd. Additional
## images are specified by the IMAGES variable. Image commands ommit the
## "openmatch-" prefix on the image name and tags.
## These commands are auto-generated based on a complete list of images.
## All folders in cmd/ are turned into an image using Dockerfile.cmd.
## Additional images are specified by the IMAGES variable.
## Image commands ommit the "openmatch-" prefix on the image name and tags.
##
list-images:
@echo $(IMAGES)
#######################################
## build-images / build-<image name>-image: builds images locally
## # Builds images locally
## build-images / build-<image name>-image
##
build-images: $(foreach IMAGE,$(IMAGES),build-$(IMAGE)-image)
@ -237,9 +242,12 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
build-mmf-go-soloduel-image: docker build-base-build-image
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
build-mmf-go-backfill-image: docker build-base-build-image
docker build -f examples/functions/golang/backfill/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-backfill:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-backfill:$(ALTERNATE_TAG) .
#######################################
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
## # Builds and pushes images to your container registry.
## push-images / push-<image name>-image
##
push-images: $(foreach IMAGE,$(IMAGES),push-$(IMAGE)-image)
@ -258,8 +266,9 @@ endif
endif
#######################################
## retag-images / retag-<image name>-image: publishes images on the public
## container registry. Used for publishing releases.
## # Publishes images on the public container registry.
## # Used for publishing releases.
## retag-images / retag-<image name>-image
##
retag-images: $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image)
@ -272,7 +281,8 @@ $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image): retag-%-image: docker
docker push $(TARGET_REGISTRY)/openmatch-$*:$(TAG)
#######################################
## clean-images / clean-<image name>-image: removes images from local docker
## # Removes images from local docker
## clean-images / clean-<image name>-image
##
clean-images: docker $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image)
-docker rmi -f open-match-base-build
@ -282,7 +292,7 @@ $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image): clean-%-image:
#####################################################################################################################
update-chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com; $(HELM) dependency update)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/incubator; $(HELM) repo add bitnami https://charts.bitnami.com/bitnami;$(HELM) dependency update)
lint-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/ct$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm; $(HELM) lint $(OPEN_MATCH_HELM_NAME))
@ -295,8 +305,8 @@ build/chart/open-match-$(BASE_VERSION).tgz: build/toolchain/bin/helm$(EXE_EXTENS
build/chart/index.yaml: build/toolchain/bin/helm$(EXE_EXTENSION) gcloud build/chart/open-match-$(BASE_VERSION).tgz
mkdir -p $(BUILD_DIR)/chart-index/
-gsutil cp gs://open-match-chart/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp gs://open-match-chart/chart/open-match-* $(BUILD_DIR)/chart-index/
-gsutil cp $(_CHARTS_BUCKET)/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp $(_CHARTS_BUCKET)/chart/open-match-* $(BUILD_DIR)/chart-index/
$(HELM) repo index $(BUILD_DIR)/chart-index/
$(HELM) repo index --merge $(BUILD_DIR)/chart-index/index.yaml $(BUILD_DIR)/chart/
@ -310,7 +320,7 @@ install-chart-prerequisite: build/toolchain/bin/kubectl$(EXE_EXTENSION) update-c
$(KUBECTL) apply -f install/gke-metadata-server-workaround.yaml
# Used for Open Match development. Install om-configmap-override.yaml by default.
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD)
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD) --set redis.usePassword=false --set redis.sentinel.usePassword=false
# Used for generate static yamls. Install om-configmap-override.yaml as needed.
HELM_TEMPLATE_FLAGS = --no-hooks --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --set usingHelmTemplate=true
HELM_IMAGE_FLAGS = --set global.image.registry=$(REGISTRY) --set global.image.tag=$(TAG)
@ -354,19 +364,22 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
--set open-match-core.redis.enabled=false \
--set global.telemetry.prometheus.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set open-match-scale.enabled=true | $(KUBECTL) apply -f -
--set global.kubernetes.serviceAccount=$(OPEN_MATCH_HELM_NAME)-unprivileged-service \
--set open-match-scale.enabled=true \
--set open-match-scale.configs.default.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-default\" \}\}" \
--set open-match-scale.configs.override.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-override\" \}\}" | $(KUBECTL) apply -f -
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
$(HELM) upgrade $(OPEN_MATCH_HELM_NAME) $(HELM_UPGRADE_FLAGS) --atomic install/helm/open-match $(HELM_IMAGE_FLAGS) \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1 \
--set evaluator.hostName=test \
--set evaluator.hostName=open-match-test \
--set evaluator.grpcPort=50509 \
--set evaluator.httpPort=51509 \
--set open-match-core.registrationInterval=200ms \
--set open-match-core.proposalCollectionInterval=200ms \
--set open-match-core.assignedDeleteTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=1s \
--set open-match-core.queryPageSize=10 \
--set global.gcpProjectId=intentionally-invalid-value \
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
@ -386,9 +399,12 @@ install/yaml/: TAG = $(BASE_VERSION)
endif
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
# We have to hard-code the Jaeger endpoints as we are excluding Jaeger, so Helm cannot determine the endpoints from the Jaeger subchart
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
--set-string global.telemetry.jaeger.agentEndpoint="$(OPEN_MATCH_HELM_NAME)-jaeger-agent:6831" \
--set-string global.telemetry.jaeger.collectorEndpoint="http://$(OPEN_MATCH_HELM_NAME)-jaeger-collector:14268/api/traces" \
install/helm/open-match > install/yaml/01-open-match-core.yaml
install/yaml/02-open-match-demo.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -406,6 +422,7 @@ install/yaml/03-prometheus-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set global.telemetry.prometheus.enabled=true \
install/helm/open-match > install/yaml/03-prometheus-chart.yaml
# We have to hard-code the Prometheus Server URL as we are excluding Prometheus, so Helm cannot determine the URL from the Prometheus subchart
install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
@ -413,6 +430,7 @@ install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set open-match-core.redis.enabled=false \
--set open-match-telemetry.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set-string global.telemetry.grafana.prometheusServer="http://$(OPEN_MATCH_HELM_NAME)-prometheus-server.$(OPEN_MATCH_KUBERNETES_NAMESPACE).svc.cluster.local:80/" \
install/helm/open-match > install/yaml/04-grafana-chart.yaml
install/yaml/05-jaeger-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -459,11 +477,29 @@ set-redis-password:
read REDIS_PASSWORD; \
stty echo; \
printf "\n"; \
$(KUBECTL) create secret generic om-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
$(KUBECTL) create secret generic open-match-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
## ####################################
## # Tool installation helpers
##
## # Install toolchain. Short for installing K8s, protoc and OpenMatch tools.
## make install-toolchain
##
install-toolchain: install-kubernetes-tools install-protoc-tools install-openmatch-tools
## # Install Kubernetes tools
## make install-kubernetes-tools
##
install-kubernetes-tools: build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/terraform$(EXE_EXTENSION)
## # Install protoc tools
## make install-protoc-tools
##
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
## # Install OpenMatch tools
## make install-openmatch-tools
##
install-openmatch-tools: build/toolchain/bin/certgen$(EXE_EXTENSION) build/toolchain/bin/reaper$(EXE_EXTENSION)
build/toolchain/bin/helm$(EXE_EXTENSION):
@ -597,7 +633,10 @@ get-kind-kubeconfig: build/toolchain/bin/kind$(EXE_EXTENSION)
delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(KIND) delete cluster
create-gke-cluster: GKE_VERSION = 1.14.10-gke.32 # gcloud beta container get-server-config --zone us-west1-a
create-cluster-role-binding:
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
create-gke-cluster: GKE_VERSION = 1.15.12-gke.20 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
@ -606,7 +645,8 @@ create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
--cluster-version $(GKE_VERSION) \
--image-type cos_containerd \
--tags open-match
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
$(MAKE) create-cluster-role-binding
delete-gke-cluster: gcloud
-$(GCLOUD) $(GCP_PROJECT_FLAG) container clusters delete $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GCLOUD_EXTRA_FLAGS)
@ -620,12 +660,19 @@ delete-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
gcp-apply-binauthz-policy: build/policies/binauthz.yaml
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container binauthz policy import build/policies/binauthz.yaml
## ####################################
## # Protobuf
##
## # Build all protobuf definitions.
## make all-protos
##
all-protos: $(ALL_PROTOS)
# The proto generator really wants to be run from the $GOPATH root, and doesn't
# support methods for directing it to the correct location that's not the proto
# file's location. So instead put it in a tempororary directory, then move it
# out.
# file's location.
# So, instead, put it in a tempororary directory, then move it out.
pkg/pb/%.pb.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/pkg/pb
$(PROTOC) $< \
@ -652,20 +699,19 @@ api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXT
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--swagger_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
## # Build API reference in markdown. Needs open-match-docs repo at the same level as this one.
## make api/api.md
##
api/api.md: third_party/ build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION)
$(PROTOC) api/*.proto \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--doc_out=. \
--doc_opt=markdown,api.md
--doc_opt=markdown,api_temp.md
# Crazy hack that insert hugo link reference to this API doc -)
$(SED_REPLACE) '1 i\---\
title: "Open Match API References" \
linkTitle: "Open Match API References" \
weight: 2 \
description: \
This document provides API references for Open Match services. \
--- \
' ./api.md && mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
cat ./docs/hugo_apiheader.txt ./api_temp.md >> api.md
mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
rm ./api_temp.md
# Include structure of the protos needs to be called out do the dependency chain is run through properly.
pkg/pb/backend.pb.go: pkg/pb/messages.pb.go
@ -674,7 +720,15 @@ pkg/pb/matchfunction.pb.go: pkg/pb/messages.pb.go
pkg/pb/query.pb.go: pkg/pb/messages.pb.go
pkg/pb/evaluator.pb.go: pkg/pb/messages.pb.go
internal/ipb/synchronizer.pb.go: pkg/pb/messages.pb.go
internal/ipb/messages.pb.go: pkg/pb/messages.pb.go
## ####################################
## # Go tasks
##
## # Build assets and binaries
## make build
##
build: assets
$(GO) build ./...
$(GO) build -tags e2ecluster ./...
@ -696,9 +750,15 @@ define fast_test_folder
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
endef
## # Run go tests
## make test
##
test: $(ALL_PROTOS) tls-certs third_party/
$(call test_folder,.)
## # Run go tests more quickly, but with worse flake and race detection
## make fasttest
##
fasttest: $(ALL_PROTOS) tls-certs third_party/
$(call fast_test_folder,.)
@ -715,6 +775,9 @@ vet:
golangci: build/toolchain/bin/golangci-lint$(EXE_EXTENSION)
GO111MODULE=on $(GOLANGCI) run --config=$(REPOSITORY_ROOT)/.golangci.yaml
## # Run linter on Go code, charts and terraform
## make lint
##
lint: fmt vet golangci lint-chart terraform-lint
assets: $(ALL_PROTOS) tls-certs third_party/ build/chart/
@ -785,13 +848,13 @@ md-test: docker
ci-deploy-artifacts: install/yaml/ $(SWAGGER_JSON_DOCS) build/chart/ gcloud
ifeq ($(_GCB_POST_SUBMIT),1)
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* gs://open-match-chart/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json gs://open-match-chart/api/v$(BASE_VERSION)/
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* $(_CHARTS_BUCKET)/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json $(_CHARTS_BUCKET)/api/v$(BASE_VERSION)/
# Deploy Helm Chart
# Since each build will refresh just it's version we can allow this for every post submit.
# Copy the files into multiple locations to keep a backup.
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/
else
@echo "Not deploying build artifacts to open-match.dev because this is not a post commit change."
endif

View File

@ -93,7 +93,7 @@ message ReleaseAllTicketsRequest{}
message ReleaseAllTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup{
message AssignmentGroup {
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
repeated string ticket_ids = 1;
@ -146,7 +146,6 @@ service BackendService {
// ReleaseTickets moves tickets from the pending state, to the active state.
// This enables them to be returned by query, and find different matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseTickets(ReleaseTicketsRequest) returns (ReleaseTicketsResponse) {
@ -159,7 +158,6 @@ service BackendService {
// ReleaseAllTickets moves all tickets from the pending state, to the active
// state. This enables them to be returned by query, and find different
// matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {

View File

@ -27,12 +27,21 @@
"/v1/backendservice/matches:fetch": {
"post": {
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
"operationId": "FetchMatches",
"operationId": "BackendService_FetchMatches",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchFetchMatchesResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchFetchMatchesResponse"
}
},
"404": {
@ -41,6 +50,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -61,7 +76,7 @@
"/v1/backendservice/tickets:assign": {
"post": {
"summary": "AssignTickets overwrites the Assignment field of the input TicketIds.",
"operationId": "AssignTickets",
"operationId": "BackendService_AssignTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -75,6 +90,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -96,7 +117,7 @@
"post": {
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseTickets",
"operationId": "BackendService_ReleaseTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -110,6 +131,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -131,7 +158,7 @@
"post": {
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseAllTickets",
"operationId": "BackendService_ReleaseAllTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -145,6 +172,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -172,6 +205,17 @@
],
"default": "UNKNOWN"
},
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignTicketsRequest": {
"type": "object",
"properties": {
@ -242,6 +286,37 @@
},
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo not read or write to this field, it is for internal tracking and changing the value will cause bugs."
}
},
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.\nRepresents a backfill entity which is used to fill partially full matches."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -258,6 +333,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Which bounds would be excluded when comparing with a ticket's search_fields.double_args value.\n\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -336,6 +415,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -519,6 +606,27 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"runtimeStreamError": {
"type": "object",
"properties": {
@ -545,20 +653,6 @@
}
}
},
"x-stream-definitions": {
"openmatchFetchMatchesResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchFetchMatchesResponse"
}
},
"externalDocs": {
"description": "Open Match Documentation",
"url": "https://open-match.dev/site/docs/"

View File

@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message EvaluateRequest {

View File

@ -27,12 +27,21 @@
"/v1/evaluator/matches:evaluate": {
"post": {
"summary": "Evaluate evaluates a list of proposed matches based on quality, collision status, and etc, then shortlist the matches and returns the final results.",
"operationId": "Evaluate",
"operationId": "Evaluator_Evaluate",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchEvaluateResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchEvaluateResponse"
}
},
"404": {
@ -41,6 +50,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -78,6 +93,37 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo not read or write to this field, it is for internal tracking and changing the value will cause bugs."
}
},
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.\nRepresents a backfill entity which is used to fill partially full matches."
},
"openmatchEvaluateRequest": {
"type": "object",
"properties": {
@ -124,6 +170,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -201,6 +255,27 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"runtimeStreamError": {
"type": "object",
"properties": {
@ -227,20 +302,6 @@
}
}
},
"x-stream-definitions": {
"openmatchEvaluateResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchEvaluateResponse"
}
},
"externalDocs": {
"description": "Open Match Documentation",
"url": "https://open-match.dev/site/docs/"

View File

@ -53,7 +53,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message CreateTicketRequest {
@ -81,6 +81,47 @@ message WatchAssignmentsResponse {
Assignment assignment = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message AcknowledgeBackfillRequest {
// An existing ID of Backfill to acknowledge.
string backfill_id = 1;
// An updated Assignment of the requested Backfill.
Assignment assignment = 2;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message CreateBackfillRequest {
// An empty Backfill object.
Backfill backfill = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message DeleteBackfillRequest {
// An existing ID of Backfill to delete.
string backfill_id = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message GetBackfillRequest {
// An existing ID of Backfill to retrieve.
string backfill_id = 1;
}
// UpdateBackfillRequest - update searchFields, extensions and set assignment.
//
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message UpdateBackfillRequest {
// A Backfill object with ID set and fields to update.
Backfill backfill = 1;
}
// The FrontendService implements APIs to manage and query status of a Tickets.
service FrontendService {
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
@ -117,4 +158,55 @@ service FrontendService {
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
};
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info
// This triggers an assignment process.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc AcknowledgeBackfill(AcknowledgeBackfillRequest) returns (Backfill) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills/{backfill_id}/acknowledge"
body: "*"
};
}
// CreateBackfill creates a new Backfill object.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc CreateBackfill(CreateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills"
body: "*"
};
}
// DeleteBackfill receives a backfill ID and deletes its resource.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc DeleteBackfill(DeleteBackfillRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// GetBackfill returns a backfill object by its ID.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc GetBackfill(GetBackfillRequest) returns (Backfill) {
option (google.api.http) = {
get: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// UpdateBackfill updates search_fields and extensions for the backfill with the provided id.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc UpdateBackfill(UpdateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
patch: "/v1/frontendservice/backfills"
body: "*"
};
}
}

View File

@ -24,10 +24,213 @@
"application/json"
],
"paths": {
"/v1/frontendservice/backfills": {
"post": {
"summary": "CreateBackfill creates a new Backfill object.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_CreateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchCreateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
},
"patch": {
"summary": "UpdateBackfill updates search_fields and extensions for the backfill with the provided id.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_UpdateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchUpdateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}": {
"get": {
"summary": "GetBackfill returns a backfill object by its ID.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_GetBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "backfill_id",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
},
"delete": {
"summary": "DeleteBackfill receives a backfill ID and deletes its resource.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_DeleteBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"properties": {}
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "backfill_id",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}/acknowledge": {
"post": {
"summary": "AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info\nThis triggers an assignment process.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_AcknowledgeBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "backfill_id",
"in": "path",
"required": true,
"type": "string"
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchAcknowledgeBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/tickets": {
"post": {
"summary": "CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.\nA ticket is considered as ready for matchmaking once it is created.\n - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.\n - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.",
"operationId": "CreateTicket",
"operationId": "FrontendService_CreateTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -41,6 +244,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -61,7 +270,7 @@
"/v1/frontendservice/tickets/{ticket_id}": {
"get": {
"summary": "GetTicket get the Ticket associated with the specified TicketId.",
"operationId": "GetTicket",
"operationId": "FrontendService_GetTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -75,6 +284,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -92,7 +307,7 @@
},
"delete": {
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
"operationId": "DeleteTicket",
"operationId": "FrontendService_DeleteTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -106,6 +321,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -125,12 +346,21 @@
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
"get": {
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "WatchAssignments",
"operationId": "FrontendService_WatchAssignments",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
}
},
"404": {
@ -139,6 +369,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -157,6 +393,18 @@
}
},
"definitions": {
"openmatchAcknowledgeBackfillRequest": {
"type": "object",
"properties": {
"backfill_id": {
"type": "string"
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment"
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -174,6 +422,46 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo not read or write to this field, it is for internal tracking and changing the value will cause bugs."
}
},
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.\nRepresents a backfill entity which is used to fill partially full matches."
},
"openmatchCreateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchCreateTicketRequest": {
"type": "object",
"properties": {
@ -241,6 +529,15 @@
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"openmatchUpdateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"description": "UpdateBackfillRequest - update searchFields, extensions and set assignment.\n\nBETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
@ -265,6 +562,27 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"runtimeStreamError": {
"type": "object",
"properties": {
@ -291,20 +609,6 @@
}
}
},
"x-stream-definitions": {
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
}
},
"externalDocs": {
"description": "Open Match Documentation",
"url": "https://open-match.dev/site/docs/"

View File

@ -69,8 +69,9 @@ message RunResponse {
// The MatchFunction service implements APIs to run user-defined matchmaking logics.
service MatchFunction {
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
// Run pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then
// constructs and streams back match candidates to the Backend service.
// Run pulls Tickets that satisfy Profile constraints from QueryService,
// runs matchmaking logic against them, then constructs and streams back
// match candidates to the Backend service.
rpc Run(RunRequest) returns (stream RunResponse) {
option (google.api.http) = {
post: "/v1/matchfunction:run"

View File

@ -26,13 +26,22 @@
"paths": {
"/v1/matchfunction:run": {
"post": {
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisify Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"operationId": "Run",
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"operationId": "MatchFunction_Run",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchRunResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchRunResponse"
}
},
"404": {
@ -41,6 +50,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -60,6 +75,17 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -77,6 +103,37 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo not read or write to this field, it is for internal tracking and changing the value will cause bugs."
}
},
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.\nRepresents a backfill entity which is used to fill partially full matches."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -93,6 +150,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Which bounds would be excluded when comparing with a ticket's search_fields.double_args value.\n\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -125,6 +186,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\n\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -305,6 +374,27 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"runtimeStreamError": {
"type": "object",
"properties": {
@ -331,20 +421,6 @@
}
}
},
"x-stream-definitions": {
"openmatchRunResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchRunResponse"
}
},
"externalDocs": {
"description": "Open Match Documentation",
"url": "https://open-match.dev/site/docs/"

View File

@ -103,6 +103,25 @@ message DoubleRangeFilter {
// Minimum value.
double min = 3;
enum Exclude {
// No bounds should be excluded when evaluating the filter, i.e.: MIN <= x <= MAX
NONE = 0;
// Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN < x <= MAX
MIN = 1;
// Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN <= x < MAX
MAX = 2;
// Both bounds should be excluded when evaluating the filter, i.e.: MIN < x < MAX
BOTH = 3;
}
// Defines the bounds to apply when filtering tickets by their search_fields.double_args value.
// BETA FEATURE WARNING: This field and the associated values are
// not finalized and still subject to possible change or removal.
Exclude exclude = 4;
}
// Filters strings exactly equaling a value.
@ -201,6 +220,45 @@ message Match {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 7;
// Backfill request which contains additional information to the match
// and contains an association to a GameServer.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
Backfill backfill = 8;
// AllocateGameServer signalise Director that Backfill is new and it should
// allocate a GameServer, this Backfill would be assigned.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
bool allocate_gameserver = 9;
// Deprecated fields.
reserved 5, 6;
}
// Represents a backfill entity which is used to fill partially full matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
message Backfill {
// Id represents an auto-generated Id issued by Open Match.
string id = 1;
// Search fields are the fields which Open Match is aware of, and can be used
// when specifying filters.
SearchFields search_fields = 2;
// Customized information not inspected by Open Match, to be used by
// the Match Function, evaluator, and components making calls to Open Match.
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 3;
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 4;
// Generation gets incremented on GameServers update operations.
// Prevents the MMF from overriding a newer version from the game server.
// Do NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs.
int64 generation = 5;
}

View File

@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message QueryTicketsRequest {
@ -75,12 +75,26 @@ message QueryTicketIdsResponse {
repeated string ids = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsRequest {
// The Pool representing the set of Filters to be queried.
Pool pool = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsResponse {
// Backfills that meet all the filtering criteria requested by the pool.
repeated Backfill backfills = 1;
}
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/tickets:query"
@ -91,11 +105,21 @@ service QueryService {
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/ticketids:query"
body: "*"
};
}
// QueryBackfills gets a list of Backfills.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc QueryBackfills(QueryBackfillsRequest) returns (stream QueryBackfillsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/backfills:query"
body: "*"
};
}
}

View File

@ -24,15 +24,25 @@
"application/json"
],
"paths": {
"/v1/queryservice/ticketids:query": {
"/v1/queryservice/backfills:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"summary": "QueryBackfills gets a list of Backfills.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "QueryService_QueryBackfills",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryBackfillsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryBackfillsResponse"
}
},
"404": {
@ -41,6 +51,61 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchQueryBackfillsRequest"
}
}
],
"tags": [
"QueryService"
]
}
},
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryService_QueryTicketIds",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -60,13 +125,22 @@
},
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTickets",
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryService_QueryTickets",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
}
},
"404": {
@ -75,6 +149,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/runtimeError"
}
}
},
"parameters": [
@ -94,6 +174,17 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -111,6 +202,37 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo not read or write to this field, it is for internal tracking and changing the value will cause bugs."
}
},
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.\nRepresents a backfill entity which is used to fill partially full matches."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -127,6 +249,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Which bounds would be excluded when comparing with a ticket's search_fields.double_args value.\n\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -170,6 +296,29 @@
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchQueryBackfillsRequest": {
"type": "object",
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryBackfillsResponse": {
"type": "object",
"properties": {
"backfills": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchBackfill"
},
"description": "Backfills that meet all the filtering criteria requested by the pool."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryTicketIdsRequest": {
"type": "object",
"properties": {
@ -307,6 +456,27 @@
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeError": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"runtimeStreamError": {
"type": "object",
"properties": {
@ -333,32 +503,6 @@
}
}
},
"x-stream-definitions": {
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
},
"openmatchQueryTicketsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
}
},
"externalDocs": {
"description": "Open Match Documentation",
"url": "https://open-match.dev/site/docs/"

View File

@ -90,7 +90,7 @@ steps:
- id: 'Build: Assets'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'assets', '-j12']
args: ['make', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'assets', '-j12']
volumes:
- name: 'go-vol'
path: '/go'
@ -106,7 +106,7 @@ steps:
- id: 'Test: Services'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'GOLANG_TEST_COUNT=10', 'test']
args: ['make', 'GOPROXY=off', 'GOLANG_TEST_COUNT=10', 'test']
volumes:
- name: 'go-vol'
path: '/go'
@ -132,7 +132,7 @@ steps:
- id: 'Deploy: Deployment Configs'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', 'ci-deploy-artifacts']
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'ci-deploy-artifacts']
waitFor: ['Lint: Format, Vet, Charts', 'Test: Deploy Open Match']
volumes:
- name: 'go-vol'
@ -153,7 +153,7 @@ steps:
artifacts:
objects:
location: gs://open-match-build-artifacts/output/
location: '${_ARTIFACTS_BUCKET}'
paths:
- install/yaml/install.yaml
- install/yaml/01-open-match-core.yaml
@ -164,10 +164,13 @@ artifacts:
- install/yaml/06-open-match-override-configmap.yaml
substitutions:
_OM_VERSION: "1.0.0"
_OM_VERSION: "0.0.0-dev"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
logsBucket: 'gs://open-match-build-logs/'
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/output/"
_LOGS_BUCKET: "gs://open-match-build-logs/"
_CHARTS_BUCKET: "gs://open-match-chart"
logsBucket: '${_LOGS_BUCKET}'
options:
sourceProvenanceHash: ['SHA256']
machineType: 'N1_HIGHCPU_32'

View File

@ -1,10 +1,10 @@
{
"urls": [
{"name": "Frontend", "url": "https://open-match.dev/api/v1.0.0/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v1.0.0/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v1.0.0/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v1.0.0/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v1.0.0/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v1.0.0/evaluator.swagger.json"}
{"name": "Frontend", "url": "https://open-match.dev/api/v0.0.0-dev/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v0.0.0-dev/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v0.0.0-dev/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v0.0.0-dev/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v0.0.0-dev/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v0.0.0-dev/evaluator.swagger.json"}
]
}

View File

@ -111,8 +111,8 @@ While iterating on the project, you may need to:
## Accessing logs
To look at Open Match core services' logs, run:
```bash
# Replace om-frontend with the service name that you would like to access
kubectl logs -n open-match svc/om-frontend
# Replace open-match-frontend with the service name that you would like to access
kubectl logs -n open-match svc/open-match-frontend
```
## API References

View File

@ -12,24 +12,13 @@ SOURCE_VERSION=$1
DEST_VERSION=$2
SOURCE_PROJECT_ID=open-match-build
DEST_PROJECT_ID=open-match-public-images
IMAGE_NAMES="openmatch-backend openmatch-frontend openmatch-query openmatch-synchronizer openmatch-minimatch openmatch-demo-first-match openmatch-mmf-go-soloduel openmatch-mmf-go-pool openmatch-evaluator-go-simple openmatch-swaggerui openmatch-reaper"
IMAGE_NAMES=$(make list-images)
for name in $IMAGE_NAMES
do
source_image=gcr.io/$SOURCE_PROJECT_ID/$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION
source_image=gcr.io/$SOURCE_PROJECT_ID/openmatch-$name:$SOURCE_VERSION
dest_image=gcr.io/$DEST_PROJECT_ID/openmatch-$name:$DEST_VERSION
docker pull $source_image
docker tag $source_image $dest_image
docker push $dest_image
done
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "=============================================================="
echo "Add these lines to your release notes:"
for name in $IMAGE_NAMES
do
echo "docker pull gcr.io/$DEST_PROJECT_ID/$name:$DEST_VERSION"
done

7
docs/hugo_apiheader.txt Normal file
View File

@ -0,0 +1,7 @@
---
title: "Open Match API References"
linkTitle: "Open Match API References"
weight: 2
description:
This document provides API references for Open Match services.
---

View File

@ -51,7 +51,7 @@ func TestFastAndSlow(t *testing.T) {
for count := 0; true; count++ {
if v := <-slow; v == "3" {
if count > 1 {
t.Error("Expected to recieve at most 1 other value on slow before recieving the latest value.")
t.Error("Expected to receive at most 1 other value on slow before receiving the latest value.")
}
break
}

View File

@ -81,7 +81,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("om-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
conn, err := grpc.Dial("open-match-frontend.open-match.svc.cluster.local:50504", grpc.WithInsecure())
if err != nil {
panic(err)
}

View File

@ -68,7 +68,7 @@ func run(ds *components.DemoShared) {
ds.Update(s)
// See https://open-match.dev/site/docs/guides/api/
conn, err := grpc.Dial("om-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
conn, err := grpc.Dial("open-match-backend.open-match.svc.cluster.local:50505", grpc.WithInsecure())
if err != nil {
panic(err)
}

View File

@ -0,0 +1,24 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM open-match-base-build as builder
WORKDIR /go/src/open-match.dev/open-match/examples/functions/golang/backfill
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o matchfunction .
FROM gcr.io/distroless/static:nonroot
WORKDIR /app/
COPY --from=builder --chown=nonroot /go/src/open-match.dev/open-match/examples/functions/golang/backfill/matchfunction /app/
ENTRYPOINT ["/app/matchfunction"]

View File

@ -0,0 +1,33 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main defines a sample match function that uses the GRPC harness to set up
// the match making function as a service. This sample is a reference
// to demonstrate the usage of the GRPC harness and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package main
import (
"open-match.dev/open-match/examples/functions/golang/backfill/mmf"
)
const (
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {
mmf.Start(queryServiceAddr, serverPort)
}

View File

@ -0,0 +1,297 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"time"
"log"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/grpc"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
const (
playersPerMatch = 2
openSlotsKey = "open-slots"
matchName = "backfill-matchfunction"
)
// matchFunctionService implements pb.MatchFunctionServer, the server generated
// by compiling the protobuf, by fulfilling the pb.MatchFunctionServer interface.
type matchFunctionService struct {
grpc *grpc.Server
queryServiceClient pb.QueryServiceClient
port int
}
func (s *matchFunctionService) Run(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
log.Printf("Generating proposals for function %v", req.GetProfile().GetName())
var proposals []*pb.Match
profile := req.GetProfile()
pools := profile.GetPools()
for _, p := range pools {
tickets, err := matchfunction.QueryPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query tickets for the given pool, got %s", err.Error())
return err
}
backfills, err := matchfunction.QueryBackfillPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query backfills for the given pool, got %s", err.Error())
return err
}
matches, err := makeMatches(profile, p, tickets, backfills)
if err != nil {
log.Printf("Failed to generate matches, got %s", err.Error())
return err
}
proposals = append(proposals, matches...)
}
log.Printf("Streaming %v proposals to Open Match", len(proposals))
// Stream the generated proposals back to Open Match.
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
log.Printf("Failed to stream proposals to Open Match, got %s", err.Error())
return err
}
}
return nil
}
// makeMatches tries to handle backfills at first, then it makes full matches, at the end it makes a match with backfill
// if tickets left
func makeMatches(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, backfills []*pb.Backfill) ([]*pb.Match, error) {
var matches []*pb.Match
newMatches, remainingTickets, err := handleBackfills(profile, tickets, backfills, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, newMatches...)
newMatches, remainingTickets = makeFullMatches(profile, remainingTickets, len(matches))
matches = append(matches, newMatches...)
if len(remainingTickets) > 0 {
match, err := makeMatchWithBackfill(profile, pool, remainingTickets, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, match)
}
return matches, nil
}
// handleBackfills looks at each backfill's openSlots which is a number of required tickets,
// acquires that tickets, decreases openSlots in backfill and makes a match with updated backfill and associated tickets.
func handleBackfills(profile *pb.MatchProfile, tickets []*pb.Ticket, backfills []*pb.Backfill, lastMatchId int) ([]*pb.Match, []*pb.Ticket, error) {
matchId := lastMatchId
var matches []*pb.Match
for _, b := range backfills {
openSlots, err := getOpenSlots(b)
if err != nil {
return nil, tickets, err
}
var matchTickets []*pb.Ticket
for openSlots > 0 && len(tickets) > 0 {
matchTickets = append(matchTickets, tickets[0])
tickets = tickets[1:]
openSlots--
}
if len(matchTickets) > 0 {
err := setOpenSlots(b, openSlots)
if err != nil {
return nil, tickets, err
}
matchId++
match := newMatch(matchId, profile.Name, matchTickets, b)
matches = append(matches, &match)
}
}
return matches, tickets, nil
}
// makeMatchWithBackfill makes not full match, creates backfill for it with openSlots = playersPerMatch-len(tickets).
func makeMatchWithBackfill(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, lastMatchId int) (*pb.Match, error) {
if len(tickets) == 0 {
return nil, fmt.Errorf("tickets are required")
}
if len(tickets) >= playersPerMatch {
return nil, fmt.Errorf("too many tickets")
}
matchId := lastMatchId
searchFields := newSearchFields(pool)
backfill, err := newBackfill(searchFields, playersPerMatch-len(tickets))
if err != nil {
return nil, err
}
matchId++
match := newMatch(matchId, profile.Name, tickets, backfill)
// indicates that it is a new match and new game server should be allocated for it
match.AllocateGameserver = true
return &match, nil
}
// makeFullMatches makes matches without backfill
func makeFullMatches(profile *pb.MatchProfile, tickets []*pb.Ticket, lastMatchId int) ([]*pb.Match, []*pb.Ticket) {
ticketNum := 0
matchId := lastMatchId
var matches []*pb.Match
for ticketNum < playersPerMatch && len(tickets) >= playersPerMatch {
ticketNum++
if ticketNum == playersPerMatch {
matchId++
match := newMatch(matchId, profile.Name, tickets[:playersPerMatch], nil)
matches = append(matches, &match)
tickets = tickets[playersPerMatch:]
ticketNum = 0
}
}
return matches, tickets
}
// newSearchFields creates search fields based on pool's search criteria. This is just example of how it can be done.
func newSearchFields(pool *pb.Pool) *pb.SearchFields {
searchFields := pb.SearchFields{}
rangeFilters := pool.GetDoubleRangeFilters()
if rangeFilters != nil {
doubleArgs := make(map[string]float64)
for _, f := range rangeFilters {
doubleArgs[f.DoubleArg] = (f.Max - f.Min) / 2
}
if len(doubleArgs) > 0 {
searchFields.DoubleArgs = doubleArgs
}
}
stringFilters := pool.GetStringEqualsFilters()
if stringFilters != nil {
stringArgs := make(map[string]string)
for _, f := range stringFilters {
stringArgs[f.StringArg] = f.Value
}
if len(stringArgs) > 0 {
searchFields.StringArgs = stringArgs
}
}
tagFilters := pool.GetTagPresentFilters()
if tagFilters != nil {
tags := make([]string, len(tagFilters))
for _, f := range tagFilters {
tags = append(tags, f.Tag)
}
if len(tags) > 0 {
searchFields.Tags = tags
}
}
return &searchFields
}
func newBackfill(searchFields *pb.SearchFields, openSlots int) (*pb.Backfill, error) {
b := pb.Backfill{
SearchFields: searchFields,
Generation: 0,
CreateTime: ptypes.TimestampNow(),
}
err := setOpenSlots(&b, int32(openSlots))
return &b, err
}
func newMatch(num int, profile string, tickets []*pb.Ticket, b *pb.Backfill) pb.Match {
t := time.Now().Format("2006-01-02T15:04:05.00")
return pb.Match{
MatchId: fmt.Sprintf("profile-%s-time-%s-num-%d", matchName, t, num),
MatchProfile: profile,
MatchFunction: matchName,
Tickets: tickets,
Backfill: b,
}
}
func setOpenSlots(b *pb.Backfill, val int32) error {
if b.Extensions == nil {
b.Extensions = make(map[string]*any.Any)
}
any, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: val})
if err != nil {
return err
}
b.Extensions[openSlotsKey] = any
return nil
}
func getOpenSlots(b *pb.Backfill) (int32, error) {
if b == nil {
return 0, fmt.Errorf("expected backfill is not nil")
}
if b.Extensions != nil {
if any, ok := b.Extensions[openSlotsKey]; ok {
var val wrappers.Int32Value
err := ptypes.UnmarshalAny(any, &val)
if err != nil {
return 0, err
}
return val.Value, nil
}
}
return playersPerMatch, nil
}

View File

@ -0,0 +1,142 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mmf
import (
"testing"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/stretchr/testify/require"
"open-match.dev/open-match/pkg/pb"
)
func TestHandleBackfills(t *testing.T) {
for _, tc := range []struct {
name string
tickets []*pb.Ticket
backfills []*pb.Backfill
lastMatchId int
expectedMatchLen int
expectedTicketLen int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns no matches when no backfills specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when no tickets specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns a match with open slots decreased", tickets: []*pb.Ticket{{Id: "1"}}, backfills: []*pb.Backfill{withOpenSlots(1)}, expectedMatchLen: 1, expectedTicketLen: 0, expectedOpenSlots: playersPerMatch - 2},
} {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets, err := handleBackfills(&profile, testCase.tickets, testCase.backfills, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
require.Equal(t, testCase.expectedTicketLen, len(tickets))
if err != nil {
require.Equal(t, 0, len(matches))
} else {
for _, m := range matches {
require.NotNil(t, m.Backfill)
openSlots, err := getOpenSlots(m.Backfill)
require.NoError(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
}
})
}
}
func TestMakeMatchWithBackfill(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns an error when length of tickets is greater then playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}, {Id: "5"}}, expectedErr: true},
{name: "returns an error when length of tickets is equal to playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}}, expectedErr: true},
{name: "returns an error when no tickets are provided", expectedErr: true},
{name: "returns a match with backfill", tickets: []*pb.Ticket{{Id: "1"}}, expectedOpenSlots: playersPerMatch - 1},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
pool := pb.Pool{}
profile := pb.MatchProfile{Name: "matchProfile"}
match, err := makeMatchWithBackfill(&profile, &pool, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
if err == nil {
require.NotNil(t, match)
require.NotNil(t, match.Backfill)
require.True(t, match.AllocateGameserver)
require.Equal(t, "", match.Backfill.Id)
openSlots, err := getOpenSlots(match.Backfill)
require.Nil(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
})
}
}
func TestMakeFullMatches(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedMatchLen int
expectedTicketLen int
}{
{name: "returns no matches when there are no tickets", tickets: []*pb.Ticket{}, expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when length of tickets is less then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}}, expectedMatchLen: 0, expectedTicketLen: 1},
{name: "returns a match when length of tickets is greater then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}}, expectedMatchLen: 1, expectedTicketLen: 0},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets := makeFullMatches(&profile, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedMatchLen, len(matches))
require.Equal(t, testCase.expectedTicketLen, len(tickets))
for _, m := range matches {
require.Nil(t, m.Backfill)
require.Equal(t, playersPerMatch, len(m.Tickets))
}
})
}
}
func withOpenSlots(openSlots int) *pb.Backfill {
val, err := ptypes.MarshalAny(&wrappers.Int32Value{Value: int32(openSlots)})
if err != nil {
panic(err)
}
return &pb.Backfill{
Extensions: map[string]*any.Any{
openSlotsKey: val,
},
}
}

View File

@ -0,0 +1,59 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"log"
"net"
"google.golang.org/grpc"
"open-match.dev/open-match/pkg/pb"
)
func Start(queryServiceAddr string, serverPort int) {
// Connect to QueryService.
conn, err := grpc.Dial(queryServiceAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Failed to connect to Open Match, got %s", err.Error())
}
defer conn.Close()
mmfService := matchFunctionService{
queryServiceClient: pb.NewQueryServiceClient(conn),
}
// Create and host a new gRPC service on the configured port.
server := grpc.NewServer()
pb.RegisterMatchFunctionServer(server, &mmfService)
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", serverPort))
if err != nil {
log.Fatalf("TCP net listener initialization failed for port %v, got %s", serverPort, err.Error())
}
log.Printf("TCP net listener initialized for port %v", serverPort)
err = server.Serve(ln)
if err != nil {
log.Fatalf("gRPC serve failed, got %s", err.Error())
}
}

View File

@ -24,8 +24,8 @@ import (
)
const (
queryServiceAddr = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {

View File

@ -19,11 +19,11 @@ import (
"open-match.dev/open-match/pkg/pb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMakeMatchesDeduplicate(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}},
@ -31,12 +31,12 @@ func TestMakeMatchesDeduplicate(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
assert.Nil(err)
assert.Equal(len(matches), 0)
require.Nil(err)
require.Equal(len(matches), 0)
}
func TestMakeMatches(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
poolNameToTickets := map[string][]*pb.Ticket{
"pool1": {{Id: "1"}, {Id: "2"}, {Id: "3"}},
@ -45,11 +45,11 @@ func TestMakeMatches(t *testing.T) {
}
matches, err := makeMatches(poolNameToTickets)
assert.Nil(err)
assert.Equal(len(matches), 3)
require.Nil(err)
require.Equal(len(matches), 3)
for _, match := range matches {
assert.Equal(2, len(match.Tickets))
assert.Equal(matchName, match.MatchFunction)
require.Equal(2, len(match.Tickets))
require.Equal(matchName, match.MatchFunction)
}
}

View File

@ -39,7 +39,7 @@ var (
func Run() {
activeScenario := scenarios.ActiveScenario
conn, err := grpc.Dial("om-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
conn, err := grpc.Dial("open-match-query.open-match.svc.cluster.local:50503", utilTesting.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}

View File

@ -101,7 +101,7 @@ func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// TODO: once the evaluator client supports sending and receiving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

View File

@ -71,7 +71,7 @@ func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[s
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// TODO: once the evaluator client supports sending and receiving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

View File

@ -28,7 +28,7 @@ import (
)
var (
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
queryServiceAddress = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",

77
go.mod
View File

@ -18,56 +18,49 @@ module open-match.dev/open-match
go 1.14
require (
cloud.google.com/go v0.47.0 // indirect
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/ocagent v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
contrib.go.opencensus.io/exporter/jaeger v0.2.1
contrib.go.opencensus.io/exporter/ocagent v0.7.0
contrib.go.opencensus.io/exporter/prometheus v0.2.0
contrib.go.opencensus.io/exporter/stackdriver v0.13.4
github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb
github.com/TV4/logrus-stackdriver-formatter v0.1.0
github.com/alicebob/miniredis/v2 v2.11.0
github.com/apache/thrift v0.13.0 // indirect
github.com/aws/aws-sdk-go v1.25.27 // indirect
github.com/alicebob/miniredis/v2 v2.14.1
github.com/aws/aws-sdk-go v1.35.26 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/fsnotify/fsnotify v1.4.7
github.com/fsnotify/fsnotify v1.4.9
github.com/go-redsync/redsync/v4 v4.0.3
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/golang/protobuf v1.3.2
github.com/golang/protobuf v1.4.3
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/grpc-ecosystem/grpc-gateway v1.12.0
github.com/imdario/mergo v0.3.8 // indirect
github.com/json-iterator/go v1.1.8 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/pelletier/go-toml v1.6.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.2.1
github.com/pseudomuto/protoc-gen-doc v1.3.2 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/pelletier/go-toml v1.8.1 // indirect
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.8.0
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/afero v1.2.1 // indirect
github.com/sirupsen/logrus v1.7.0
github.com/spf13/afero v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stretchr/testify v1.4.0
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.13.0 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6
google.golang.org/grpc v1.25.0
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.6.1
go.opencensus.io v0.22.5
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd // indirect
golang.org/x/text v0.3.4 // indirect
google.golang.org/api v0.35.0 // indirect
google.golang.org/genproto v0.0.0-20201112120144-2985b7af83de
google.golang.org/grpc v1.33.2
google.golang.org/protobuf v1.25.0
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.2.5 // indirect
k8s.io/api v0.0.0-20191004102255-dacd7df5a50b // kubernetes-1.13.12
k8s.io/apimachinery v0.0.0-20191004074956-01f8b7d1121a // kubernetes-1.13.12
k8s.io/client-go v0.0.0-20191004102537-eb5b9a8cfde7 // kubernetes-1.13.12
k8s.io/api v0.0.0-20191004102349-159aefb8556b // kubernetes-1.14.10
k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689 // kubernetes-1.14.10
k8s.io/client-go v11.0.1-0.20191029005444-8e4128053008+incompatible // kubernetes-1.14.10
k8s.io/klog v1.0.0 // indirect
sigs.k8s.io/yaml v1.1.0 // indirect
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

655
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -13,13 +13,13 @@
# limitations under the License.
apiVersion: v2
appVersion: "1.0.0"
version: 1.0.0
appVersion: "0.0.0-dev"
version: 0.0.0-dev
name: open-match
dependencies:
- name: redis
version: 9.5.0
repository: https://kubernetes-charts.storage.googleapis.com/
version: 12.3.3
repository: https://charts.bitnami.com/bitnami
condition: open-match-core.redis.enabled
- name: open-match-telemetry
version: 0.0.0-dev

View File

@ -0,0 +1,20 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{- define "openmatchcustomize.function.hostName" -}}
{{- .Values.function.hostName | default (printf "%s-function" (include "openmatch.fullname" . ) ) -}}
{{- end -}}

View File

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.evaluator.hostName }}
name: {{ include "openmatch.evaluator.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .Values.evaluator.hostName }}
name: {{ include "openmatch.evaluator.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Values.evaluator.hostName }}
name: {{ include "openmatch.evaluator.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.evaluator.hostName }}
name: {{ include "openmatch.evaluator.hostName" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "openmatch.name" . }}
@ -82,12 +82,13 @@ spec:
component: evaluator
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.evaluatorConfigs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.evaluator.hostName }}
- name: {{ include "openmatch.evaluator.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.evaluatorConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -18,7 +18,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.function.hostName }}
name: {{ include "openmatchcustomize.function.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -46,20 +46,20 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .Values.function.hostName }}
name: {{ include "openmatchcustomize.function.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Values.function.hostName }}
name: {{ include "openmatchcustomize.function.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.function.hostName }}
name: {{ include "openmatchcustomize.function.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -83,12 +83,13 @@ spec:
component: matchfunction
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.mmfConfigs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.mmfConfigs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.function.hostName }}
- name: {{ include "openmatchcustomize.function.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.mmfConfigs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -35,11 +35,13 @@ evaluatorConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
customize:
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: om-configmap-override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
mmfConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
@ -48,8 +50,10 @@ mmfConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
customize:
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: om-configmap-override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'

View File

@ -0,0 +1,42 @@
{*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*}
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "openmatchscale.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "openmatchscale.scaleBackend.hostName" -}}
{{- .Values.scaleBackend.hostName | default (printf "%s-backend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatchscale.scaleFrontend.hostName" -}}
{{- .Values.scaleFrontend.hostName | default (printf "%s-frontend" (include "openmatchscale.fullname" . ) ) -}}
{{- end -}}

View File

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.scaleBackend.hostName }}
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.scaleBackend.hostName }}
name: {{ include "openmatchscale.scaleBackend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.scaleBackend.hostName }}
- name: {{ include "openmatchscale.scaleBackend.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -15,7 +15,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.scaleFrontend.hostName }}
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -34,7 +34,7 @@ spec:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ .Values.scaleFrontend.hostName }}
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -59,11 +59,11 @@ spec:
release: {{ .Release.Name }}
spec:
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.scaleFrontend.hostName }}
- name: {{ include "openmatchscale.scaleFrontend.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: open-match-scale-dashboard
name: {{ include "openmatchscale.fullname" . }}-dashboard
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"

View File

@ -13,13 +13,13 @@
# limitations under the License.
scaleFrontend:
hostName: om-scale-frontend
hostName:
httpPort: 51509
replicas: 1
image: openmatch-scale-frontend
scaleBackend:
hostName: om-scale-backend
hostName:
httpPort: 51509
replicas: 1
image: openmatch-scale-backend
@ -28,8 +28,10 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: om-configmap-override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'

View File

@ -20,14 +20,14 @@ version: 0.0.0-dev
dependencies:
- name: prometheus
version: 9.2.0
repository: https://kubernetes-charts.storage.googleapis.com/
repository: https://charts.helm.sh/stable
condition: global.telemetry.prometheus.enabled,prometheus.enabled
- name: grafana
version: 4.0.1
repository: https://kubernetes-charts.storage.googleapis.com/
repository: https://charts.helm.sh/stable
condition: global.telemetry.grafana.enabled,grafana.enabled
- name: jaeger
version: 0.13.3
repository: https://kubernetes-charts-incubator.storage.googleapis.com/
repository: https://charts.helm.sh/stable
condition: global.telemetry.jaeger.enabled,jaeger.enabled

View File

@ -62,7 +62,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (pod_name) (\n sum(\n rate(container_cpu_usage_seconds_total{pod_name=~\"om-.*\", container_name!=\"POD\"}[5m])\n ) by (pod_name, container_name) \n \n /\n \n sum(\n container_spec_cpu_quota{pod_name=~\"om-.*\", container_name!=\"POD\"} / container_spec_cpu_period{pod_name=~\"om-.*\", container_name!=\"POD\"}\n ) by (pod_name, container_name) \n \n * \n \n 100\n)",
"expr": "avg by (pod_name) (\n\nsum(\n rate(container_cpu_usage_seconds_total{container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))\n) by (pod_name, container_name)\n\n/\n\nsum(\n (container_spec_cpu_quota{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n /\n (container_spec_cpu_period{container_name!=\"POD\"} * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"open-match\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\")))\n) by (pod_name, container_name)\n\n*\n\n100\n)\n",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}}",
@ -155,7 +155,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_goroutines{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component) (go_goroutines{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -256,7 +256,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component,app) (process_resident_memory_bytes{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -265,7 +265,7 @@
"step": 4
},
{
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component,app) (process_virtual_memory_bytes{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -365,7 +365,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(process_resident_memory_bytes{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - resident",
@ -374,7 +374,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(process_virtual_memory_bytes{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - virtual",
@ -475,7 +475,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component) (go_memstats_alloc_bytes{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -484,7 +484,7 @@
"step": 4
},
{
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (rate(go_memstats_alloc_bytes_total{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - alloc rate",
@ -493,7 +493,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component) (go_memstats_stack_inuse_bytes{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -502,7 +502,7 @@
"step": 4
},
{
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component) (go_memstats_heap_inuse_bytes{app=~\"open-match\"})",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -604,7 +604,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_alloc_bytes{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - bytes allocated",
@ -613,7 +613,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_stack_inuse_bytes{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}} - stack inuse",
@ -622,7 +622,7 @@
"step": 4
},
{
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(go_memstats_heap_inuse_bytes{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
@ -719,7 +719,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component) (process_open_fds{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -815,7 +815,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"}[$interval]))",
"expr": "avg by (component) (deriv(process_open_fds{app=~\"open-match\"}[$interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}",
@ -911,7 +911,7 @@
"steppedLine": false,
"targets": [
{
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\", kubernetes_pod_name=~\"om-.*\"})",
"expr": "avg by (component, quantile) (go_gc_duration_seconds{app=~\"open-match\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{component}}: {{quantile}}",

View File

@ -348,14 +348,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m]) * on (pod_name) group_left(label_app) max by (pod_name, label_app) (label_replace(kube_pod_labels{label_app=\"redis\"}, \"pod_name\", \"$1\", \"pod\", \"(.*)\"))) by (pod_name)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}} usage",
"refId": "A"
},
{
"expr": "sum(kube_pod_container_resource_limits_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"expr": "sum(kube_pod_container_resource_limits_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@ -363,7 +363,7 @@
"refId": "B"
},
{
"expr": "sum(kube_pod_container_resource_requests_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"expr": "sum(kube_pod_container_resource_requests_cpu_cores * on (pod) group_left(label_app) max by (pod, label_app) (kube_pod_labels{label_app=\"redis\"})) by (pod)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "request",

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: open-match-dashboards
name: {{ include "openmatch.fullname" . }}-dashboards
labels:
grafana_dashboard: "1"
data:

View File

@ -0,0 +1,31 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.global.telemetry.grafana.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.fullname" . }}-datasource
labels:
grafana_datasource: "1"
data:
datasource.yaml: |-
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: {{ tpl .Values.global.telemetry.grafana.prometheusServer . }}
access: proxy
isDefault: true
{{- end }}

View File

@ -142,17 +142,10 @@ grafana:
notifiers: {}
sidecar:
dashboards:
enabled: true
enabled: true
datasources:
enabled: true
plugins: grafana-piechart-panel
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://open-match-prometheus-server.{{ .Release.Namespace }}.svc.cluster.local:80/
access: proxy
isDefault: true
jaeger:
enabled: true

View File

@ -22,6 +22,26 @@ Expand the name of the chart.
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
Instead of .Chart.Name, we hard-code "open-match" as we need to call this from subcharts, but get the
same result as if called from this chart.
*/}}
{{- define "openmatch.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default "open-match" .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Render chart metadata labels: "chart", "heritage" unless "openmatch.noChartMeta" is set.
*/}}
@ -57,7 +77,7 @@ resources:
{{- range $configIndex, $configValues := .configs }}
- name: {{ $configValues.volumeName }}
configMap:
name: {{ $configValues.configName }}
name: {{ tpl $configValues.configName $ }}
{{- end }}
{{- end -}}
@ -74,10 +94,10 @@ resources:
{{- if .Values.global.tls.enabled }}
- name: tls-server-volume
secret:
secretName: om-tls-server
secretName: {{ include "openmatch.fullname" . }}-tls-server
- name: root-ca-volume
secret:
secretName: om-tls-rootca
secretName: {{ include "openmatch.fullname" . }}-tls-rootca
{{- end -}}
{{- end -}}
@ -92,7 +112,7 @@ resources:
{{- if .Values.redis.usePassword }}
- name: redis-password
secret:
secretName: {{ .Values.redis.fullnameOverride }}
secretName: {{ include "call-nested" (list . "redis" "redis.fullname") }}
{{- end -}}
{{- end -}}
@ -135,3 +155,72 @@ minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.serviceAccount.name" -}}
{{- .Values.global.kubernetes.serviceAccount | default (printf "%s-unprivileged-service" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.swaggerui.hostName" -}}
{{- .Values.swaggerui.hostName | default (printf "%s-swaggerui" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.query.hostName" -}}
{{- .Values.query.hostName | default (printf "%s-query" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.frontend.hostName" -}}
{{- .Values.frontend.hostName | default (printf "%s-frontend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.backend.hostName" -}}
{{- .Values.backend.hostName | default (printf "%s-backend" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.synchronizer.hostName" -}}
{{- .Values.synchronizer.hostName | default (printf "%s-synchronizer" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.evaluator.hostName" -}}
{{- .Values.evaluator.hostName | default (printf "%s-evaluator" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.configmap.default" -}}
{{- printf "%s-configmap-default" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.configmap.override" -}}
{{- printf "%s-configmap-override" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.jaeger.agent" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.agent.name") }}:6831
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "openmatch.jaeger.collector" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
http://{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.collector.name") }}:14268/api/traces
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Call templates from sub-charts in a synthesized context, workaround for https://github.com/helm/helm/issues/3920
Mainly useful for things like `{{ include "call-nested" (list . "redis" "redis.fullname") }}`
https://github.com/helm/helm/issues/4535#issuecomment-416022809
https://github.com/helm/helm/issues/4535#issuecomment-477778391
*/}}
{{- define "call-nested" }}
{{- $dot := index . 0 }}
{{- $subchart := index . 1 | splitList "." }}
{{- $template := index . 2 }}
{{- $values := $dot.Values }}
{{- range $subchart }}
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.backend.hostName }}
name: {{ include "openmatch.backend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .Values.backend.hostName }}
name: {{ include "openmatch.backend.hostName" . }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Values.backend.hostName }}
name: {{ include "openmatch.backend.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.backend.hostName }}
name: {{ include "openmatch.backend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.backend.hostName }}
- name: {{ include "openmatch.backend.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.frontend.hostName }}
name: {{ include "openmatch.frontend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .Values.frontend.hostName }}
name: {{ include "openmatch.frontend.hostName" . }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Values.frontend.hostName }}
name: {{ include "openmatch.frontend.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.frontend.hostName }}
name: {{ include "openmatch.frontend.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.frontend.hostName }}
- name: {{ include "openmatch.frontend.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: om-configmap-default
name: {{ include "openmatch.configmap.default" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -50,28 +50,28 @@ data:
api:
backend:
hostname: "{{ .Values.backend.hostName }}"
hostname: "{{ include "openmatch.backend.hostName" . }}"
grpcport: "{{ .Values.backend.grpcPort }}"
httpport: "{{ .Values.backend.httpPort }}"
frontend:
hostname: "{{ .Values.frontend.hostName }}"
hostname: "{{ include "openmatch.frontend.hostName" . }}"
grpcport: "{{ .Values.frontend.grpcPort }}"
httpport: "{{ .Values.frontend.httpPort }}"
query:
hostname: "{{ .Values.query.hostName }}"
hostname: "{{ include "openmatch.query.hostName" . }}"
grpcport: "{{ .Values.query.grpcPort }}"
httpport: "{{ .Values.query.httpPort }}"
synchronizer:
hostname: "{{ .Values.synchronizer.hostName }}"
hostname: "{{ include "openmatch.synchronizer.hostName" . }}"
grpcport: "{{ .Values.synchronizer.grpcPort }}"
httpport: "{{ .Values.synchronizer.httpPort }}"
swaggerui:
hostname: "{{ .Values.swaggerui.hostName }}"
hostname: "{{ include "openmatch.swaggerui.hostName" . }}"
httpport: "{{ .Values.swaggerui.httpPort }}"
# Configurations for api.test and api.scale are used for testing.
test:
hostname: "test"
hostname: "{{ include "openmatch.fullname" . }}-test"
grpcport: "50509"
httpport: "51509"
scale:
@ -90,11 +90,11 @@ data:
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ .Values.redis.fullnameOverride }}
sentinelHostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
hostname: {{ .Values.redis.fullnameOverride }}-master.{{ .Release.Namespace }}.svc.cluster.local
hostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
port: {{ .Values.redis.redisPort }}
user: {{ .Values.redis.user }}
{{- end}}
@ -119,8 +119,13 @@ data:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
{{- if .Values.global.telemetry.jaeger.enabled }}
agentEndpoint: "{{ tpl .Values.global.telemetry.jaeger.agentEndpoint . }}"
collectorEndpoint: "{{ tpl .Values.global.telemetry.jaeger.collectorEndpoint . }}"
{{- else }}
agentEndpoint: ""
collectorEndpoint: ""
{{- end }}
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"

View File

@ -16,7 +16,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: om-configmap-override
name: {{ include "openmatch.configmap.override" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,9 +40,10 @@ data:
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
backfillLockTimeout: {{ index .Values "open-match-core" "backfillLockTimeout" }}
api:
evaluator:
hostname: "{{ .Values.evaluator.hostName }}"
hostname: "{{ include "openmatch.evaluator.hostName" . }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
{{- end }}

View File

@ -14,11 +14,11 @@
{{- if index .Values "open-match-core" "enabled" }}
{{- if empty .Values.ci }}
# om-redis-podsecuritypolicy is the least restricted PSP used to create privileged pods to disable THP in host kernel.
# This is the least restricted PSP used to create privileged pods to disable THP in host kernel.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: om-redis-podsecuritypolicy
name: {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
@ -51,11 +51,11 @@ spec:
fsGroup:
rule: 'RunAsAny'
---
# om-core-podsecuritypolicy does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
# This does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: om-core-podsecuritypolicy
name: {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.query.hostName }}
name: {{ include "openmatch.query.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -44,19 +44,19 @@ spec:
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ .Values.query.hostName }}
name: {{ include "openmatch.query.hostName" . }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ .Values.query.hostName }}
name: {{ include "openmatch.query.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.query.hostName }}
name: {{ include "openmatch.query.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -82,12 +82,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.query.hostName }}
- name: {{ include "openmatch.query.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -29,7 +29,7 @@ metadata:
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.global.kubernetes.serviceAccount }}
name: {{ include "openmatch.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,28 +40,26 @@ automountServiceAccountToken: true
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-service-role
name: {{ include "openmatch.fullname" . }}-service-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-service-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- om-core-podsecuritypolicy
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
verbs:
- use
---
# This applies om-service-role to the open-match unprivileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-service-role-binding
name: {{ include "openmatch.fullname" . }}-service-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -73,34 +71,33 @@ subjects:
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: om-service-role
name: {{ include "openmatch.fullname" . }}-service-role
apiGroup: rbac.authorization.k8s.io
---
{{- if index .Values "open-match-core" "redis" "enabled" }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-redis-role
name: {{ include "openmatch.fullname" . }}-redis-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-redis-role to use om-redis-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- om-redis-podsecuritypolicy
- {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
verbs:
- use
---
# This applies om-redis role to the om-redis privileged service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-redis-role-binding
name: {{ include "openmatch.fullname" . }}-redis-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -108,10 +105,11 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Values.redis.serviceAccount.name }} # Redis service account
name: {{ include "call-nested" (list . "redis" "redis.serviceAccountName") }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: om-redis-role
name: {{ include "openmatch.fullname" . }}-redis-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.swaggerui.hostName }}
name: {{ include "openmatch.swaggerui.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -36,7 +36,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.swaggerui.hostName }}
name: {{ include "openmatch.swaggerui.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -61,11 +61,11 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.swaggerui.hostName }}
- name: {{ include "openmatch.swaggerui.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -16,7 +16,7 @@
kind: Service
apiVersion: v1
metadata:
name: {{ .Values.synchronizer.hostName }}
name: {{ include "openmatch.synchronizer.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,7 +40,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.synchronizer.hostName }}
name: {{ include "openmatch.synchronizer.hostName" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -66,12 +66,12 @@ spec:
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (dict "configs" .Values.configs) | nindent 8}}
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.configs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}
{{- include "openmatch.volumes.withredis" . | nindent 8 }}
serviceAccountName: {{ .Values.global.kubernetes.serviceAccount }}
serviceAccountName: {{ include "openmatch.serviceAccount.name" . }}
containers:
- name: {{ .Values.synchronizer.hostName }}
- name: {{ include "openmatch.synchronizer.hostName" . }}
volumeMounts:
{{- include "openmatch.volumemounts.configs" (dict "configs" .Values.configs) | nindent 10 }}
{{- include "openmatch.volumemounts.tls" . | nindent 10 }}

View File

@ -14,11 +14,10 @@
{{- if .Values.ci }}
# This applies om-test-role to the open-match-test-service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-test-role-binding
name: {{ include "openmatch.fullname" . }}-test-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -26,11 +25,11 @@ metadata:
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: open-match-test-service
name: {{ include "openmatch.fullname" . }}-test-service
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: om-test-role
name: {{ include "openmatch.fullname" . }}-test-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -17,23 +17,22 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: om-test-role
name: {{ include "openmatch.fullname" . }}-test-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
# Define om-test-role to use om-core-podsecuritypolicy
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- om-core-podsecuritypolicy
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
verbs:
- use
# Grant om-test-role get & list permission for k8s endpoints and pods resources
# Grant this role get & list permission for k8s endpoints and pods resources
# Required for e2e in-cluster testing.
- apiGroups:
- ""

View File

@ -14,11 +14,11 @@
{{- if .Values.ci }}
# Create a service account for open-match-test services.
# Create a service account for test services.
apiVersion: v1
kind: ServiceAccount
metadata:
name: open-match-test-service
name: {{ include "openmatch.fullname" . }}-test-service
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:

View File

@ -17,7 +17,7 @@
kind: Service
apiVersion: v1
metadata:
name: test
name: {{ include "openmatch.fullname" . }}-test
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -40,7 +40,7 @@ spec:
apiVersion: v1
kind: Pod
metadata:
name: test
name: {{ include "openmatch.fullname" . }}-test
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
@ -52,19 +52,19 @@ metadata:
spec:
# Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.
activeDeadlineSeconds: 900
serviceAccountName: open-match-test-service
serviceAccountName: {{ include "openmatch.fullname" . }}-test-service
automountServiceAccountToken: true
volumes:
- configMap:
defaultMode: 420
name: om-configmap-default
name: {{ include "openmatch.configmap.default" . }}
name: om-config-volume-default
- configMap:
defaultMode: 420
name: om-configmap-override
name: {{ include "openmatch.configmap.override" . }}
name: om-config-volume-override
containers:
- name: "test"
- name: {{ include "openmatch.fullname" . }}-test
volumeMounts:
- mountPath: /app/config/default
name: om-config-volume-default

View File

@ -17,7 +17,7 @@
apiVersion: v1
kind: Secret
metadata:
name: om-tls-rootca
name: {{ include "openmatch.fullname" . }}-tls-rootca
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
@ -31,9 +31,9 @@ data:
apiVersion: v1
kind: Secret
metadata:
name: om-tls-server
name: {{ include "openmatch.fullname" . }}-tls-server
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 2 }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: tls

View File

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Specifies om-query as the in-cluster domain name for the `query` service.
# # Override the default in-cluster domain name for the `query` service to om-query.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,67 +44,68 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName: om-swaggerui
hostName:
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName: om-query
hostName:
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName: om-frontend
hostName:
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName: om-backend
hostName:
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName: om-synchronizer
hostName:
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName: om-evaluator
hostName:
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName: om-function
hostName:
grpcPort: 50502
httpPort: 51502
replicas: 3
# Specifies the location and name of the Open Match application-level config volumes.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
# Used in template: `openmatch.volumemounts.configs` and `openmatch.volumes.configs` under `templates/_helpers.tpl` file.
configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: om-configmap-override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -133,7 +134,6 @@ redis:
slaveCount: 3
serviceAccount:
create: true
name: open-match-redis-service
slave:
persistence:
enabled: false
@ -174,7 +174,7 @@ open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
@ -188,6 +188,8 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -195,7 +197,7 @@ open-match-core:
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 500
maxActive: 500
@ -208,8 +210,6 @@ open-match-core:
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -222,7 +222,6 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -249,8 +248,8 @@ global:
limits:
memory: 3Gi
cpu: 2
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -272,10 +271,9 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 1.0.0
tag: 0.0.0-dev
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
@ -286,8 +284,8 @@ global:
enabled: true
jaeger:
enabled: false
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
prometheus:
enabled: false
endpoint: "/metrics"

View File

@ -23,7 +23,7 @@
# Begins the configuration section for `query` component in Open Match.
# query:
#
# # Specifies om-query as the in-cluster domain name for the `query` service.
# # Override the default in-cluster domain name for the `query` service to om-query.
# hostName: om-query
#
# # Specifies the port for receiving RESTful HTTP requests in the `query` service.
@ -44,46 +44,46 @@
# # Specifies the image name to be used in a Kubernetes pod for `query` compoenent.
# image: openmatch-query
swaggerui: &swaggerui
hostName: om-swaggerui
hostName:
httpPort: 51500
portType: ClusterIP
replicas: 1
image: openmatch-swaggerui
query: &query
hostName: om-query
hostName:
grpcPort: 50503
httpPort: 51503
portType: ClusterIP
replicas: 3
image: openmatch-query
frontend: &frontend
hostName: om-frontend
hostName:
grpcPort: 50504
httpPort: 51504
portType: ClusterIP
replicas: 3
image: openmatch-frontend
backend: &backend
hostName: om-backend
hostName:
grpcPort: 50505
httpPort: 51505
portType: ClusterIP
replicas: 3
image: openmatch-backend
synchronizer: &synchronizer
hostName: om-synchronizer
hostName:
grpcPort: 50506
httpPort: 51506
portType: ClusterIP
replicas: 1
image: openmatch-synchronizer
evaluator: &evaluator
hostName: om-evaluator
hostName:
grpcPort: 50508
httpPort: 51508
replicas: 3
function: &function
hostName: om-function
hostName:
grpcPort: 50502
httpPort: 51502
replicas: 3
@ -94,17 +94,18 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.default" . }}'
override:
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: om-configmap-override
# This will be parsed through the `tpl` function.
configName: '{{ include "openmatch.configmap.override" . }}'
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
fullnameOverride: om-redis
redisPort: 6379
usePassword: false
usePasswordFile: false
@ -128,7 +129,6 @@ redis:
slaveCount: 2
serviceAccount:
create: true
name: open-match-redis-service
sysctlImage:
# Enable this setting in production if you are running Open Match under Linux environment
enabled: false
@ -159,7 +159,7 @@ open-match-core:
enabled: true
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
@ -173,6 +173,8 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -180,7 +182,7 @@ open-match-core:
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
user:
user:
pool:
maxIdle: 200
maxActive: 0
@ -193,8 +195,6 @@ open-match-core:
open-match-scale:
# Switch the value between true/false to turn on/off this subchart
enabled: false
frontend: *frontend
backend: *backend
# Controls if users need to install the monitoring tools in Open Match.
open-match-telemetry:
@ -207,7 +207,6 @@ open-match-customize:
enabled: false
evaluator: *evaluator
function: *function
query: *query
# You can override the evaluator/mmf image
# evaluator:
# image: [YOUR_EVALUATOR_IMAGE]
@ -234,8 +233,8 @@ global:
limits:
memory: 100Mi
cpu: 100m
# Defines a service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount: open-match-unprivileged-service
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -257,10 +256,9 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 1.0.0
tag: 0.0.0-dev
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
@ -271,8 +269,8 @@ global:
enabled: true
jaeger:
enabled: false
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
prometheus:
enabled: false
endpoint: "/metrics"
@ -282,3 +280,5 @@ global:
prefix: "open_match"
grafana:
enabled: false
# This will be called with `tpl` in the open-match-telemetry subchart namespace.
prometheusServer: 'http://{{ include "call-nested" (list . "prometheus" "prometheus.server.fullname") }}.{{ .Release.Namespace }}.svc.cluster.local:80/'

View File

@ -0,0 +1,26 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openmatch.internal;
option go_package = "open-match.dev/open-match/internal/ipb";
import "api/messages.proto";
message BackfillInternal {
// Represents a backfill entity which is used to fill partially full matches
openmatch.Backfill backfill = 1;
// List of ticket IDs associated with a current backfill
repeated string ticket_ids = 2;
}

View File

@ -26,10 +26,11 @@ import (
)
var (
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
ticketsTimeToAssignment = stats.Int64("open-match.dev/backend/ticket_time_to_assignment", "Time to assignment for tickets", stats.UnitMilliseconds)
totalMatchesView = &view.View{
Measure: totalBytesPerMatch,
@ -61,6 +62,13 @@ var (
Description: "Number of tickets released per request",
Aggregation: view.Sum(),
}
ticketsTimeToAssignmentView = &view.View{
Measure: ticketsTimeToAssignment,
Name: "open-match.dev/backend/ticket_time_to_assignment",
Description: "Time to assignment for tickets",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the backend service and binds it to the serving harness.
@ -81,6 +89,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
ticketsPerMatchView,
ticketsAssignedView,
ticketsReleasedView,
ticketsTimeToAssignmentView,
)
return nil
}

View File

@ -22,12 +22,15 @@ import (
"net/http"
"strings"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
@ -53,6 +56,7 @@ var (
"app": "openmatch",
"component": "app.backend",
})
errBackfillGenerationMismatch = errors.New("backfill generation mismatch")
)
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
@ -87,7 +91,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
return synchronizeSend(ctx, syncStream, m, proposals)
})
eg.Go(func() error {
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs, s.store)
})
var mmfErr error
@ -102,13 +106,8 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
// TODO: Send mmf error in FetchSummary instead of erroring call.
if syncErr != nil || mmfErr != nil {
logger.WithFields(logrus.Fields{
"syncErr": syncErr,
"mmfErr": mmfErr,
}).Error("error(s) in FetchMatches call.")
return fmt.Errorf(
"error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
"error(s) in FetchMatches call. syncErr=[%v], mmfErr=[%v]",
syncErr,
mmfErr,
)
@ -145,7 +144,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc, store statestore.Service) error {
var startMmfsOnce sync.Once
for {
@ -172,6 +171,31 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
if !ok {
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
}
backfill := match.GetBackfill()
if backfill != nil {
ticketIds := make([]string, 0, len(match.Tickets))
for _, t := range match.Tickets {
ticketIds = append(ticketIds, t.Id)
}
err = createOrUpdateBackfill(ctx, backfill, ticketIds, store)
if err != nil {
e, ok := status.FromError(err)
if err == errBackfillGenerationMismatch || (ok && e.Code() == codes.NotFound) {
err = doReleaseTickets(ctx, ticketIds, store)
if err != nil {
logger.WithError(err).Errorf("failed to remove match tickets from pending release: %v", ticketIds)
}
continue
}
return errors.Wrapf(err, "failed to handle match backfill: %s", match.MatchId)
}
}
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
@ -201,17 +225,13 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
var conn *grpc.ClientConn
conn, err := cc.GetGRPC(address)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish grpc client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
return status.Error(codes.InvalidArgument, "failed to establish grpc client connection to match function")
}
client := pb.NewMatchFunctionClient(conn)
stream, err := client.Run(ctx, &pb.RunRequest{Profile: profile})
if err != nil {
logger.WithError(err).Error("failed to run match function for profile")
err = errors.Wrap(err, "failed to run match function for profile")
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
@ -225,7 +245,7 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
break
}
if err != nil {
logger.Errorf("%v.Run() error, %v\n", client, err)
err = errors.Wrapf(err, "%v.Run() error, %v", client, err)
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
@ -245,11 +265,8 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProfile, address string, proposals chan<- *pb.Match) error {
client, baseURL, err := cc.GetHTTP(address)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"function": address,
}).Error("failed to establish rest client connection to match function")
return status.Error(codes.InvalidArgument, "failed to connect to match function")
err = errors.Wrapf(err, "failed to establish rest client connection to match function: %s", address)
return status.Error(codes.InvalidArgument, err.Error())
}
var m jsonpb.Marshaler
@ -265,7 +282,7 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return status.Errorf(codes.Internal, "failed to get response from mmf run for proile %s: %s", profile.Name, err.Error())
return status.Errorf(codes.Internal, "failed to get response from mmf run for profile %s: %s", profile.Name, err.Error())
}
defer func() {
err = resp.Body.Close()
@ -306,16 +323,25 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
}
func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTicketsRequest) (*pb.ReleaseTicketsResponse, error) {
err := doReleasetickets(ctx, req, s.store)
err := doReleaseTickets(ctx, req.GetTicketIds(), s.store)
if err != nil {
logger.WithError(err).Error("failed to remove the awaiting tickets from the ignore list for requested tickets")
return nil, err
}
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
return &pb.ReleaseTicketsResponse{}, nil
}
func doReleaseTickets(ctx context.Context, ticketIds []string, store statestore.Service) error {
err := store.DeleteTicketsFromPendingRelease(ctx, ticketIds)
if err != nil {
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
return err
}
stats.Record(ctx, ticketsReleased.M(int64(len(ticketIds))))
return nil
}
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
err := s.store.ReleaseAllTickets(ctx)
if err != nil {
@ -328,7 +354,6 @@ func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseA
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp, err := doAssignTickets(ctx, req, s.store)
if err != nil {
logger.WithError(err).Error("failed to update assignments for requested tickets")
return nil, err
}
@ -341,13 +366,69 @@ func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicket
return resp, nil
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, err := store.UpdateAssignments(ctx, req)
func createOrUpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIds []string, store statestore.Service) error {
if backfill.Id == "" {
backfill.Id = xid.New().String()
backfill.CreateTime = ptypes.TimestampNow()
backfill.Generation = 1
err := store.CreateBackfill(ctx, backfill, ticketIds)
if err != nil {
return err
}
return store.IndexBackfill(ctx, backfill)
}
m := store.NewMutex(backfill.Id)
err := m.Lock(ctx)
if err != nil {
return err
}
defer func() {
_, unlockErr := m.Unlock(ctx)
if unlockErr != nil {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).WithError(unlockErr).Error("failed to make unlock")
}
}()
b, ids, err := store.GetBackfill(ctx, backfill.Id)
if err != nil {
return err
}
if b.Generation != backfill.Generation {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).
WithError(errBackfillGenerationMismatch).
Errorf("failed to update backfill, expecting: %d generation but got: %d", b.Generation, backfill.Generation)
return errBackfillGenerationMismatch
}
b.SearchFields = backfill.SearchFields
b.Extensions = backfill.Extensions
b.Generation++
err = store.UpdateBackfill(ctx, b, append(ids, ticketIds...))
if err != nil {
return err
}
return store.IndexBackfill(ctx, b)
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, tickets, err := store.UpdateAssignments(ctx, req)
if err != nil {
logger.WithError(err).Error("failed to update assignments")
return nil, err
}
for _, ticket := range tickets {
err = recordTimeToAssignment(ctx, ticket)
if err != nil {
logger.WithError(err).Errorf("failed to record time to assignment for ticket %s", ticket.Id)
}
}
ids := []string{}
for _, ag := range req.Assignments {
@ -363,7 +444,7 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
}
}
if err = store.DeleteTicketsFromIgnoreList(ctx, ids); err != nil {
if err = store.DeleteTicketsFromPendingRelease(ctx, ids); err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": ids,
}).Error(err)
@ -372,14 +453,18 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
return resp, nil
}
func doReleasetickets(ctx context.Context, req *pb.ReleaseTicketsRequest, store statestore.Service) error {
err := store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds())
func recordTimeToAssignment(ctx context.Context, ticket *pb.Ticket) error {
if ticket.Assignment == nil {
return fmt.Errorf("assignment for ticket %s is nil", ticket.Id)
}
now := time.Now()
created, err := ptypes.Timestamp(ticket.CreateTime)
if err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": req.GetTicketIds(),
}).WithError(err).Error("failed to delete the tickets from the ignore list")
return err
}
stats.Record(ctx, ticketsTimeToAssignment.M(now.Sub(created).Milliseconds()))
return nil
}

View File

@ -63,7 +63,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
// then returns matches which don't collide with previously returned matches.
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
matches := make([]*matchInp, 0)
nilEvlautionInputs := 0
nilEvaluationInputs := 0
for m := range in {
// Evaluation criteria is optional, but sort it lower than any matches which
@ -82,7 +82,7 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
continue
}
} else {
nilEvlautionInputs++
nilEvaluationInputs++
}
matches = append(matches, &matchInp{
match: m,
@ -90,16 +90,17 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
})
}
if nilEvlautionInputs > 0 {
if nilEvaluationInputs > 0 {
logger.WithFields(logrus.Fields{
"count": nilEvlautionInputs,
"count": nilEvaluationInputs,
}).Info("Some matches don't have the optional field evaluation_input set.")
}
sort.Sort(byScore(matches))
d := decollider{
ticketsUsed: make(map[string]*collidingMatch),
ticketsUsed: make(map[string]*collidingMatch),
backfillsUsed: make(map[string]*collidingMatch),
}
for _, m := range matches {
@ -121,11 +122,25 @@ type collidingMatch struct {
}
type decollider struct {
resultIDs []string
ticketsUsed map[string]*collidingMatch
resultIDs []string
ticketsUsed map[string]*collidingMatch
backfillsUsed map[string]*collidingMatch
}
func (d *decollider) maybeAdd(m *matchInp) {
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
if cm, ok := d.backfillsUsed[m.match.Backfill.Id]; ok {
logger.WithFields(logrus.Fields{
"match_id": m.match.GetMatchId(),
"backfill_id": m.match.Backfill.Id,
"match_score": m.inp.GetScore(),
"colliding_match_id": cm.id,
"colliding_match_score": cm.score,
}).Info("Higher quality match with colliding backfill found. Rejecting match.")
return
}
}
for _, t := range m.match.GetTickets() {
if cm, ok := d.ticketsUsed[t.Id]; ok {
logger.WithFields(logrus.Fields{
@ -139,6 +154,13 @@ func (d *decollider) maybeAdd(m *matchInp) {
}
}
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
d.backfillsUsed[m.match.Backfill.Id] = &collidingMatch{
id: m.match.GetMatchId(),
score: m.inp.GetScore(),
}
}
for _, t := range m.match.GetTickets() {
d.ticketsUsed[t.Id] = &collidingMatch{
id: m.match.GetMatchId(),

View File

@ -21,7 +21,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"open-match.dev/open-match/pkg/pb"
)
@ -37,6 +37,9 @@ func TestEvaluate(t *testing.T) {
ticket1 := &pb.Ticket{Id: "1"}
ticket2 := &pb.Ticket{Id: "2"}
ticket3 := &pb.Ticket{Id: "3"}
backfill0 := &pb.Backfill{}
backfill1 := &pb.Backfill{Id: "1"}
backfill2 := &pb.Backfill{Id: "2"}
ticket12Score1 := &pb.Match{
MatchId: "ticket12Score1",
@ -78,6 +81,61 @@ func TestEvaluate(t *testing.T) {
},
}
ticket1Backfill0Score1 := &pb.Match{
MatchId: "ticket1Backfill0Score1",
Tickets: []*pb.Ticket{ticket1},
Backfill: backfill0,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket2Backfill0Score1 := &pb.Match{
MatchId: "ticket2Backfill0Score1",
Tickets: []*pb.Ticket{ticket2},
Backfill: backfill0,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score1 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score10 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 10,
}),
},
}
ticket12Backfill2Score5 := &pb.Match{
MatchId: "ticket12Backfill2Score5",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill2,
Extensions: map[string]*any.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 5,
}),
},
}
tests := []struct {
description string
testMatches []*pb.Match
@ -108,6 +166,16 @@ func TestEvaluate(t *testing.T) {
testMatches: []*pb.Match{ticket12Score1, ticket12Score10, ticket123Score5, ticket3Score50},
wantMatchIDs: []string{ticket12Score10.GetMatchId(), ticket3Score50.GetMatchId()},
},
{
description: "test evaluator ignores backfills with empty id",
testMatches: []*pb.Match{ticket1Backfill0Score1, ticket2Backfill0Score1},
wantMatchIDs: []string{ticket1Backfill0Score1.GetMatchId(), ticket2Backfill0Score1.GetMatchId()},
},
{
description: "test deduplicates matches by backfill and tickets and returns match with higher score",
testMatches: []*pb.Match{ticket12Backfill1Score1, ticket12Backfill1Score10, ticket12Backfill2Score5},
wantMatchIDs: []string{ticket12Backfill1Score10.GetMatchId()},
},
}
for _, test := range tests {
@ -122,17 +190,17 @@ func TestEvaluate(t *testing.T) {
close(in)
err := evaluate(context.Background(), in, out)
assert.Nil(t, err)
require.Nil(t, err)
gotMatchIDs := []string{}
close(out)
for id := range out {
gotMatchIDs = append(gotMatchIDs, id)
}
assert.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
require.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
for _, mID := range gotMatchIDs {
assert.Contains(t, test.wantMatchIDs, mID)
require.Contains(t, test.wantMatchIDs, mID)
}
})
}

View File

@ -19,19 +19,12 @@ import (
"context"
"io"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
"go.opencensus.io/stats"
"golang.org/x/sync/errgroup"
"open-match.dev/open-match/pkg/pb"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "evaluator.harness.golang",
})
)
// Evaluator is the function signature for the Evaluator to be implemented by
// the user. The harness will pass the Matches to evaluate to the Evaluator
// and the Evaluator will return an accepted list of Matches.
@ -95,8 +88,5 @@ func (s *evaluatorService) Evaluate(stream pb.Evaluator_EvaluateServer) error {
})
err := g.Wait()
if err != nil {
logger.WithError(err).Error("Error in evaluator.Evaluate")
}
return err
return errors.Wrap(err, "Error in evaluator.Evaluate")
}

View File

@ -25,8 +25,10 @@ import (
)
var (
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerBackfill = stats.Int64("open-match.dev/frontend/total_bytes_per_backfill", "Total bytes per backfill", stats.UnitBytes)
searchFieldsPerBackfill = stats.Int64("open-match.dev/frontend/searchfields_per_backfill", "Searchfields per backfill", stats.UnitDimensionless)
totalBytesPerTicketView = &view.View{
Measure: totalBytesPerTicket,
@ -40,6 +42,18 @@ var (
Description: "SearchFields per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
totalBytesPerBackfillView = &view.View{
Measure: totalBytesPerBackfill,
Name: "open-match.dev/frontend/total_bytes_per_backfill",
Description: "Total bytes per backfill",
Aggregation: telemetry.DefaultBytesDistribution,
}
searchFieldsPerBackfillView = &view.View{
Measure: searchFieldsPerBackfill,
Name: "open-match.dev/frontend/searchfields_per_backfill",
Description: "SearchFields per backfill",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindService creates the frontend service and binds it to the serving harness.
@ -56,6 +70,8 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
b.RegisterViews(
totalBytesPerTicketView,
searchFieldsPerTicketView,
totalBytesPerBackfillView,
searchFieldsPerBackfillView,
)
return nil
}

View File

@ -83,25 +83,147 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
err := store.CreateTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to create the ticket")
return nil, err
}
err = store.IndexTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"ticket": ticket,
}).Error("failed to index the ticket")
return nil, err
}
return ticket, nil
}
// CreateBackfill creates a new Backfill object.
// it assigns an unique Id to the input Backfill and record it in state storage.
// Set initial LastAcknowledge time for this Backfill.
// A Backfill is considered as ready for matchmaking once it is created.
// - If SearchFields exist in a Backfill, CreateBackfill will also index these fields such that one can query the ticket with query.QueryBackfills function.
func (s *frontendService) CreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest) (*pb.Backfill, error) {
// Perform input validation.
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
if req.Backfill.CreateTime != nil {
return nil, status.Errorf(codes.InvalidArgument, "backfills cannot be created with create time set")
}
return doCreateBackfill(ctx, req, s.store)
}
func doCreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest, store statestore.Service) (*pb.Backfill, error) {
// Generate an id and create a Backfill in state storage
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input ticket proto")
}
backfill.Id = xid.New().String()
backfill.CreateTime = ptypes.TimestampNow()
backfill.Generation = 1
sfCount := 0
sfCount += len(backfill.GetSearchFields().GetDoubleArgs())
sfCount += len(backfill.GetSearchFields().GetStringArgs())
sfCount += len(backfill.GetSearchFields().GetTags())
stats.Record(ctx, searchFieldsPerBackfill.M(int64(sfCount)))
stats.Record(ctx, totalBytesPerBackfill.M(int64(proto.Size(backfill))))
err := store.CreateBackfill(ctx, backfill, []string{})
if err != nil {
return nil, err
}
err = store.IndexBackfill(ctx, backfill)
if err != nil {
return nil, err
}
return backfill, nil
}
// UpdateBackfill updates a Backfill object, if present.
// Update would increment generation in Redis.
// Only Extensions and SearchFields would be updated.
// CreateTime is not changed on Update
func (s *frontendService) UpdateBackfill(ctx context.Context, req *pb.UpdateBackfillRequest) (*pb.Backfill, error) {
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input backfill proto")
}
bfID := backfill.Id
if bfID == "" {
return nil, status.Error(codes.InvalidArgument, "backfill ID should exist")
}
m := s.store.NewMutex(bfID)
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(ctx); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bfStored, associatedTickets, err := s.store.GetBackfill(ctx, bfID)
if err != nil {
return nil, err
}
// Update generation here, because Frontend is used by GameServer only
bfStored.SearchFields = backfill.SearchFields
bfStored.Extensions = backfill.Extensions
// Autoincrement generation, input backfill generation validation is performed
// on Backend only (after MMF round)
bfStored.Generation++
err = s.store.UpdateBackfill(ctx, bfStored, []string{})
if err != nil {
return nil, err
}
err = s.store.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
if err != nil {
return nil, err
}
err = s.store.IndexBackfill(ctx, bfStored)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": bfStored.Id,
}).Error("failed to index the backfill")
return nil, err
}
return bfStored, nil
}
// DeleteBackfill deletes a Backfill by its ID.
func (s *frontendService) DeleteBackfill(ctx context.Context, req *pb.DeleteBackfillRequest) (*empty.Empty, error) {
bfID := req.GetBackfillId()
if bfID == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
err := s.store.DeleteBackfillCompletely(ctx, bfID)
// Deleting of Backfill is inevitable when it is expired, so we don't worry about error here
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("error on DeleteBackfill")
}
return &empty.Empty{}, nil
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
@ -118,10 +240,6 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// Deindex this Ticket to remove it from matchmaking pool.
err := store.DeindexTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to deindex the ticket")
return err
}
@ -137,12 +255,12 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
"id": id,
}).Error("failed to delete the ticket")
}
err = store.DeleteTicketsFromIgnoreList(ctx, []string{id})
err = store.DeleteTicketsFromPendingRelease(ctx, []string{id})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to delete the ticket from ignorelist")
}).Error("failed to delete the ticket from pendingRelease")
}
// TODO: If other redis queues are implemented or we have custom index fields
// created by Open Match, those need to be cleaned up here.
@ -152,20 +270,7 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// GetTicket get the Ticket associated with the specified TicketId.
func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketRequest) (*pb.Ticket, error) {
return doGetTickets(ctx, req.GetTicketId(), s.store)
}
func doGetTickets(ctx context.Context, id string, store statestore.Service) (*pb.Ticket, error) {
ticket, err := store.GetTicket(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": id,
}).Error("failed to get the ticket")
return nil, err
}
return ticket, nil
return s.store.GetTicket(ctx, req.GetTicketId())
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
@ -197,7 +302,6 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
err := sender(currAssignment)
if err != nil {
logger.WithError(err).Error("failed to send Redis response to grpc server")
return status.Errorf(codes.Aborted, err.Error())
}
}
@ -206,3 +310,71 @@ func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignme
return store.GetAssignments(ctx, id, callback)
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info.
// This triggers an assignment process.
func (s *frontendService) AcknowledgeBackfill(ctx context.Context, req *pb.AcknowledgeBackfillRequest) (*pb.Backfill, error) {
if req.GetBackfillId() == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
if req.GetAssignment() == nil {
return nil, status.Errorf(codes.InvalidArgument, ".Assignment is required")
}
m := s.store.NewMutex(req.GetBackfillId())
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(ctx); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bf, associatedTickets, err := s.store.GetBackfill(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
err = s.store.UpdateAcknowledgmentTimestamp(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
if len(associatedTickets) != 0 {
resp, _, err := s.store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{{TicketIds: associatedTickets, Assignment: req.GetAssignment()}},
})
if err != nil {
return nil, err
}
// log errors returned from UpdateAssignments to track tickets with NotFound errors
for _, f := range resp.Failures {
logger.Errorf("failed to assign ticket %s, cause %d", f.TicketId, f.Cause)
}
for _, id := range associatedTickets {
err = s.store.DeindexTicket(ctx, id)
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
if err != nil {
logger.WithError(err).Errorf("failed to deindex ticket %s after updating the assignments", id)
}
}
// Remove all tickets associated with backfill, because unassigned tickets are not found only
err = s.store.UpdateBackfill(ctx, bf, []string{})
if err != nil {
return nil, err
}
}
return bf, nil
}
// GetBackfill fetches a Backfill object by its ID.
func (s *frontendService) GetBackfill(ctx context.Context, req *pb.GetBackfillRequest) (*pb.Backfill, error) {
bf, _, err := s.store.GetBackfill(ctx, req.GetBackfillId())
return bf, err
}

View File

@ -22,8 +22,9 @@ import (
"testing"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/statestore"
@ -77,17 +78,194 @@ func TestDoCreateTickets(t *testing.T) {
test.preAction(cancel)
res, err := doCreateTicket(ctx, &pb.CreateTicketRequest{Ticket: test.ticket}, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
if err == nil {
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
assert.True(t, matched)
assert.Nil(t, err)
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
require.True(t, matched)
require.NoError(t, err)
require.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
}
})
}
}
func TestCreateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg, store}
var testCases = []struct {
description string
request *pb.CreateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.CreateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "createTime should not exist in input",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{CreateTime: ptypes.TimestampNow()}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfills cannot be created with create time set",
},
{
description: "empty Backfill, no errors",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "normal backfill",
request: &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err := fs.CreateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs = frontendService{cfg, store}
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestUpdateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg, store}
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
},
},
},
})
require.NoError(t, err)
require.NotNil(t, res)
var testCases = []struct {
description string
request *pb.UpdateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.UpdateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "empty Backfill, error with no backfill ID",
request: &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfill ID should exist",
},
{
description: "normal backfill",
request: &pb.UpdateBackfillRequest{
Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err = fs.UpdateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, tc.request.Backfill.SearchFields.DoubleArgs, res.SearchFields.DoubleArgs)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
fs = frontendService{cfg, store}
defer closer()
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err = fs.UpdateBackfill(ctx, &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unknown.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestDoWatchAssignments(t *testing.T) {
testTicket := &pb.Ticket{
Id: "test-id",
@ -118,12 +296,12 @@ func TestDoWatchAssignments(t *testing.T) {
{
description: "expect two assignment reads from preAction writes and fail in grpc aborted code",
preAction: func(ctx context.Context, t *testing.T, store statestore.Service, wantAssignments []*pb.Assignment, wg *sync.WaitGroup) {
assert.Nil(t, store.CreateTicket(ctx, testTicket))
require.Nil(t, store.CreateTicket(ctx, testTicket))
go func(wg *sync.WaitGroup) {
for i := 0; i < len(wantAssignments); i++ {
time.Sleep(50 * time.Millisecond)
_, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
_, _, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{testTicket.GetId()},
@ -131,7 +309,7 @@ func TestDoWatchAssignments(t *testing.T) {
},
},
})
assert.Nil(t, err)
require.NoError(t, err)
wg.Done()
}
}(wg)
@ -155,16 +333,97 @@ func TestDoWatchAssignments(t *testing.T) {
test.preAction(ctx, t, store, test.wantAssignments, &wg)
err := doWatchAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
wg.Wait()
for i := 0; i < len(gotAssignments); i++ {
assert.Equal(t, gotAssignments[i], test.wantAssignments[i])
require.Equal(t, gotAssignments[i], test.wantAssignments[i])
}
})
}
}
// TestAcknowledgeBackfillValidation - test input validation only
func TestAcknowledgeBackfillValidation(t *testing.T) {
cfg := viper.New()
tests := []struct {
description string
request *pb.AcknowledgeBackfillRequest
expectedMessage string
}{
{
description: "no BackfillId, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "", Assignment: &pb.Assignment{Connection: "10.0.0.1"}},
expectedMessage: ".BackfillId is required",
},
{
description: "no Assignment, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "1234", Assignment: nil},
expectedMessage: ".Assignment is required",
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs := frontendService{cfg, store}
bf, err := fs.AcknowledgeBackfill(ctx, test.request)
require.Equal(t, codes.InvalidArgument.String(), status.Convert(err).Code().String())
require.Equal(t, test.expectedMessage, status.Convert(err).Message())
require.Nil(t, bf)
})
}
}
// TestAcknowledgeBackfill verifies timestamp part of AcknowledgeBackfill call,
// assignment part tested in a corresponding E2E test.
// Expired backfill can not be acknowledged
func TestAcknowledgeBackfill(t *testing.T) {
cfg := viper.New()
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
fs := frontendService{cfg, store}
bf, err := fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.NoError(t, err)
require.NotNil(t, bf)
// Use wrong BackfillID, error is returned
bf, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: "42", Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Error(t, err)
require.Nil(t, bf)
require.Equal(t, "Backfill id: 42 not found", status.Convert(err).Message())
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
ids, err := store.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, ids, 1)
bf, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Nil(t, bf)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "can not acknowledge an expired backfill, id: 1")
}
func TestDoDeleteTicket(t *testing.T) {
fakeTicket := &pb.Ticket{
Id: "1",
@ -211,7 +470,7 @@ func TestDoDeleteTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
err := doDeleteTicket(ctx, fakeTicket.GetId(), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
})
}
}
@ -264,13 +523,119 @@ func TestDoGetTicket(t *testing.T) {
test.preAction(ctx, cancel, store)
ticket, err := doGetTickets(ctx, fakeTicket.GetId(), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
ticket, err := store.GetTicket(ctx, fakeTicket.GetId())
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
if err == nil {
assert.Equal(t, test.wantTicket.GetId(), ticket.GetId())
assert.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
require.Equal(t, test.wantTicket.GetId(), ticket.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, ticket.SearchFields.DoubleArgs)
}
})
}
}
func TestGetBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
cfg := viper.New()
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service)
wantTicket *pb.Backfill
wantCode codes.Code
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
wantCode: codes.Unavailable,
},
{
description: "expect not found code since ticket does not exist",
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
wantCode: codes.NotFound,
},
{
description: "expect ok code with output ticket equivalent to fakeBackfill",
preAction: func(ctx context.Context, _ context.CancelFunc, store statestore.Service) {
store.CreateBackfill(ctx, fakeBackfill, []string{})
},
wantCode: codes.OK,
wantTicket: fakeBackfill,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
fs := frontendService{cfg, store}
test.preAction(ctx, cancel, store)
backfill, err := fs.GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
if err == nil {
require.Equal(t, test.wantTicket.GetId(), backfill.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, backfill.SearchFields.DoubleArgs)
}
})
}
}
func TestDoDeleteBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
ctx := context.Background()
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
cfg := viper.New()
fs := frontendService{cfg, store}
tests := []struct {
description string
id string
wantCode codes.Code
}{
{
description: "expect ok code since delete backfill does not care about if backfill exists or not",
id: "222",
wantCode: codes.OK,
},
{
description: "expect ok code",
id: "1",
wantCode: codes.OK,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
_, err := fs.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.NoError(t, err)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
})
}
}

250
internal/app/query/cache.go Normal file
View File

@ -0,0 +1,250 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
// cache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type cache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Multithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
value interface{}
update func(statestore.Service, interface{}) error
err error
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (c *cache) request(ctx context.Context, f func(interface{})) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled before request sent.")
case <-c.startRunRequest:
go c.runRequest()
case c.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled waiting for access.")
case <-cr.runNow:
defer c.wg.Done()
}
if c.err != nil {
return c.err
}
f(c.value)
return nil
}
func (c *cache) runRequest() {
defer func() {
c.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-c.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-c.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
c.err = c.update(c.store, c.value)
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the cache.
for _, req := range reqs {
c.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
c.wg.Done()
}
}
// wait for requests to finish using cache.
c.wg.Wait()
}
func newTicketCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Ticket),
update: updateTicketCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateTicketCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Ticket, but got: %T", value)
}
t := time.Now()
previousCount := len(tickets)
currentAll, err := store.GetIndexedIDSet(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id := range tickets {
if _, ok := currentAll[id]; !ok {
delete(tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := store.GetTickets(context.Background(), toFetch)
if err != nil {
return err
}
for _, t := range newTickets {
tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tickets))
return nil
}
func newBackfillCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Backfill),
update: updateBackfillCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateBackfillCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Backfill, but got: %T", value)
}
t := time.Now()
previousCount := len(backfills)
index, err := store.GetIndexedBackfills(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id, backfill := range backfills {
generation, ok := index[id]
if !ok || backfill.Generation < int64(generation) {
delete(backfills, id)
deletedCount++
}
}
toFetch := []string{}
for id := range index {
if _, ok := backfills[id]; !ok {
toFetch = append(toFetch, id)
}
}
fetchedBackfills, err := store.GetBackfills(context.Background(), toFetch)
if err != nil {
return err
}
for _, b := range fetchedBackfills {
backfills[b.Id] = b
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
logger.Debugf("Backfill Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(backfills))
return nil
}

View File

@ -19,13 +19,15 @@ import (
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
backfillsPerQuery = stats.Int64("open-match.dev/query/backfills_per_query", "Number of backfills per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of items query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
@ -36,10 +38,16 @@ var (
Description: "Tickets per query",
Aggregation: telemetry.DefaultCountDistribution,
}
backfillsPerQueryView = &view.View{
Measure: ticketsPerQuery,
Name: "open-match.dev/query/backfills_per_query",
Description: "Backfills per query",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheTotalItemsView = &view.View{
Measure: cacheTotalItems,
Name: "open-match.dev/query/total_cached_items",
Description: "Total number of cached tickets",
Description: "Total number of cached items",
Aggregation: view.LastValue(),
}
cacheFetchedItemsView = &view.View{
@ -70,9 +78,11 @@ var (
// BindService creates the query service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
store := statestore.New(p.Config())
service := &queryService{
cfg: p.Config(),
tc: newTicketCache(b, p.Config()),
tc: newTicketCache(b, store),
bc: newBackfillCache(b, store),
}
b.AddHandleFunc(func(s *grpc.Server) {
@ -80,6 +90,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
}, pb.RegisterQueryServiceHandlerFromEndpoint)
b.RegisterViews(
ticketsPerQueryView,
backfillsPerQueryView,
cacheTotalItemsView,
cacheUpdateView,
cacheFetchedItemsView,

View File

@ -15,20 +15,14 @@
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
@ -40,10 +34,11 @@ var (
)
// queryService API provides utility functions for common MMF functionality such
// as retreiving Tickets from state storage.
// as retrieving Tickets from state storage.
type queryService struct {
cfg config.View
tc *ticketCache
tc *cache
bc *cache
}
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
@ -59,7 +54,13 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
var results []*pb.Ticket
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
@ -67,7 +68,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
})
if err != nil {
logger.WithError(err).Error("Failed to run request.")
err = errors.Wrap(err, "QueryTickets: failed to run request")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
@ -103,7 +104,13 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
var results []string
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
@ -111,7 +118,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
})
if err != nil {
logger.WithError(err).Error("Failed to run request.")
err = errors.Wrap(err, "QueryTicketIds: failed to run request")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
@ -134,6 +141,56 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
return nil
}
func (s *queryService) QueryBackfills(req *pb.QueryBackfillsRequest, responseServer pb.QueryService_QueryBackfillsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
if err != nil {
return err
}
var results []*pb.Backfill
err = s.bc.request(ctx, func(value interface{}) {
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Backfill, but got: %T", value)
return
}
for _, backfill := range backfills {
if pf.In(backfill) {
results = append(results, backfill)
}
}
})
if err != nil {
err = errors.Wrap(err, "QueryBackfills: failed to run request")
return err
}
stats.Record(ctx, backfillsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryBackfillsResponse{
Backfills: results[start:end],
})
if err != nil {
return err
}
}
return nil
}
func getPageSize(cfg config.View) int {
const (
name = "queryPageSize"
@ -165,159 +222,3 @@ func getPageSize(cfg config.View) int {
return pSize
}
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// ticketCache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type ticketCache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Mutlithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
tickets map[string]*pb.Ticket
err error
}
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
tickets: make(map[string]*pb.Ticket),
}
tc.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
case <-tc.startRunRequest:
go tc.runRequest()
case tc.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
case <-cr.runNow:
defer tc.wg.Done()
}
if tc.err != nil {
return tc.err
}
f(tc.tickets)
return nil
}
func (tc *ticketCache) runRequest() {
defer func() {
tc.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-tc.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-tc.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
tc.update()
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
for _, req := range reqs {
tc.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
tc.wg.Done()
}
}
// wait for requests to finish using ticket cache.
tc.wg.Wait()
}
func (tc *ticketCache) update() {
st := time.Now()
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
if err != nil {
tc.err = err
return
}
deletedCount := 0
for id := range tc.tickets {
if _, ok := currentAll[id]; !ok {
delete(tc.tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tc.tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
if err != nil {
tc.err = err
return
}
for _, t := range newTickets {
tc.tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

View File

@ -18,6 +18,7 @@ import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"open-match.dev/open-match/internal/config"
)
@ -61,9 +62,7 @@ func TestGetPageSize(t *testing.T) {
cfg := viper.New()
tt.configure(cfg)
actual := getPageSize(cfg)
if actual != tt.expected {
t.Errorf("got %d, want %d", actual, tt.expected)
}
require.Equal(t, tt.expected, actual)
})
}
}

View File

@ -43,7 +43,7 @@ var (
// Streams from multiple GRPC calls of matches are combined on a single channel.
// These matches are sent to the evaluator, then the tickets are added to the
// ignore list. Finally the matches are returned to the calling stream.
// pending release list. Finally the matches are returned to the calling stream.
// receive from backend | Synchronize
// -> m1c ->
@ -51,11 +51,11 @@ var (
// -> m2c ->
// remember return channel m7c for match | fanInFanOut
// -> m3c ->
// setmappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// set mappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// -> m4c -> (buffered)
// send to evaluator | wrapEvaluator
// -> m5c -> (buffered)
// add tickets to ignore list | addMatchesToIgnoreList
// add tickets to pending release | addMatchesToPendingRelease
// -> m6c ->
// fan out to origin synchronize call | fanInFanOut
// -> (Synchronize call specific ) m7c -> (buffered)
@ -113,7 +113,7 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
registration.allM1cSent.Done()
return
}
registration.m1c.send(mAndM6c{m: req.Proposal, m7c: registration.m7c})
registration.m1c.send(mAndM7c{m: req.Proposal, m7c: registration.m7c})
}
}()
@ -212,7 +212,7 @@ func (s *synchronizerService) runCycle() {
/////////////////////////////////////// Initialize cycle
ctx, cancel := contextcause.WithCancelCause(context.Background())
m2c := make(chan mAndM6c)
m2c := make(chan mAndM7c)
m3c := make(chan *pb.Match)
m4c := make(chan *pb.Match)
m5c := make(chan string)
@ -240,8 +240,8 @@ func (s *synchronizerService) runCycle() {
go s.cacheMatchIDToTicketIDs(matchTickets, m3c, m4c)
go s.wrapEvaluator(ctx, cancel, bufferMatchChannel(m4c), m5c)
go func() {
s.addMatchesToIgnoreList(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for ignore list, but not all matches returned, the next cycle
s.addMatchesToPendingRelease(ctx, matchTickets, cancel, bufferStringChannel(m5c), m6c)
// Wait for pending release, but not all matches returned, the next cycle
// can start now.
close(closedOnCycleEnd)
}()
@ -289,17 +289,24 @@ Registration:
r.cancelMmfs <- struct{}{}
}
})
<-closedOnCycleEnd
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
// Clean up in case it was never needed.
cancelProposalCollection.Stop()
err := s.store.CleanupBackfills(ctx)
if err != nil {
logger.Errorf("Failed to clean up backfills, %s", err.Error())
}
}
///////////////////////////////////////
///////////////////////////////////////
type mAndM6c struct {
type mAndM7c struct {
m *pb.Match
m7c chan string
}
@ -309,10 +316,10 @@ type mAndM6c struct {
// This channel is remembered in a map, and the match is passed to be evaluated.
// When a match returns from evaluation, it's ID is looked up in the map and the
// match is returned on that channel.
func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
m6cMap := make(map[string]chan<- string)
func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
m7cMap := make(map[string]chan<- string)
defer func(m2c <-chan mAndM6c) {
defer func(m2c <-chan mAndM7c) {
for range m2c {
}
}(m2c)
@ -321,7 +328,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
select {
case m2, ok := <-m2c:
if ok {
m6cMap[m2.m.GetMatchId()] = m2.m7c
m7cMap[m2.m.GetMatchId()] = m2.m7c
m3c <- m2.m
} else {
close(m3c)
@ -334,7 +341,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
return
}
m7c, ok := m6cMap[m5]
m7c, ok := m7cMap[m5]
if ok {
m7c <- m5
} else {
@ -350,8 +357,8 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
///////////////////////////////////////
type cutoffSender struct {
m1c chan<- mAndM6c
m2c chan<- mAndM6c
m1c chan<- mAndM7c
m2c chan<- mAndM7c
closed chan struct{}
closeOnce sync.Once
}
@ -359,8 +366,8 @@ type cutoffSender struct {
// cutoffSender allows values to be passed on the provided channel until cutoff
// has been called. This closed the provided channel. Calls to send after
// cutoff work, but values are ignored.
func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
m1c := make(chan mAndM6c)
func newCutoffSender(m2c chan<- mAndM7c) *cutoffSender {
m1c := make(chan mAndM7c)
c := &cutoffSender{
m1c: m1c,
m2c: m2c,
@ -383,7 +390,7 @@ func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
}
// send passes the value on the channel if still open, otherwise does nothing.
func (c *cutoffSender) send(match mAndM6c) {
func (c *cutoffSender) send(match mAndM7c) {
select {
case <-c.closed:
case c.m1c <- match:
@ -435,10 +442,10 @@ func getTicketIds(tickets []*pb.Ticket) []string {
///////////////////////////////////////
// Calls statestore to add all of the tickets returned by the evaluator to the
// ignorelist. If it partially fails for whatever reason (not all tickets will
// nessisarily be in the same call), only the matches which can be safely
// pendingRelease list. If it partially fails for whatever reason (not all tickets will
// necessarily be in the same call), only the matches which can be safely
// returned to the Synchronize calls are.
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
totalMatches := 0
successfulMatches := 0
var lastErr error
@ -453,7 +460,7 @@ func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *syn
}
}
err := s.store.AddTicketsToIgnoreList(ctx, ids)
err := s.store.AddTicketsToPendingRelease(ctx, ids)
totalMatches += len(mIDs)
if err == nil {
@ -472,10 +479,10 @@ func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *syn
"error": lastErr.Error(),
"totalMatches": totalMatches,
"successfulMatches": successfulMatches,
}).Error("some or all matches were not successfully added to the ignore list, failed matches dropped")
}).Error("some or all matches were not successfully added to the pending release, failed matches dropped")
if successfulMatches == 0 {
cancel(fmt.Errorf("no matches successfully added to the ignore list. Last error: %w", lastErr))
cancel(fmt.Errorf("no matches successfully added to the pending release. Last error: %w", lastErr))
}
}
close(m6c)

View File

@ -21,6 +21,7 @@ import (
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -72,16 +73,23 @@ func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
}, nil
}
type filteredEntity interface {
GetId() string
GetSearchFields() *pb.SearchFields
GetCreateTime() *timestamp.Timestamp
}
// In returns true if the Ticket meets all the criteria for this PoolFilter.
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
s := ticket.GetSearchFields()
func (pf *PoolFilter) In(entity filteredEntity) bool {
s := entity.GetSearchFields()
if s == nil {
s = emptySearchFields
}
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
// CreateTime is only populated by Open Match and hence expected to be valid.
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
if ct, err := ptypes.Timestamp(entity.GetCreateTime()); err == nil {
if !pf.CreatedAfter.IsZero() {
if !ct.After(pf.CreatedAfter) {
return false
@ -96,7 +104,7 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
} else {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": ticket.GetId(),
"id": entity.GetId(),
}).Error("failed to get time from Timestamp proto")
}
}
@ -106,10 +114,27 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
if !ok {
return false
}
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
switch f.Exclude {
case pb.DoubleRangeFilter_NONE:
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MIN:
if !(v > f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MAX:
if !(v >= f.Min && v < f.Max) {
return false
}
case pb.DoubleRangeFilter_BOTH:
if !(v > f.Min && v < f.Max) {
return false
}
}
}
for _, f := range pf.StringEqualsFilters {

View File

@ -19,7 +19,7 @@ import (
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/filter/testcases"
@ -27,31 +27,53 @@ import (
)
func TestMeetsCriteria(t *testing.T) {
testInclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if !pf.In(entity) {
t.Error("entity should be included in the pool")
}
}
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if !pf.In(tc.Ticket) {
t.Error("ticket should be included in the pool")
}
testInclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
testInclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
})
}
testExclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if pf.In(entity) {
t.Error("ticket should be excluded from the pool")
}
}
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if pf.In(tc.Ticket) {
t.Error("ticket should be excluded from the pool")
}
testExclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
testExclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: ptypes.TimestampNow(),
})
})
}
}
@ -83,10 +105,13 @@ func TestValidPoolFilter(t *testing.T) {
tc := tc
t.Run(tc.name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.pool)
assert.Nil(t, pf)
require.Error(t, err)
require.Nil(t, pf)
s := status.Convert(err)
assert.Equal(t, tc.code, s.Code())
assert.Equal(t, tc.msg, s.Message())
require.Equal(t, tc.code, s.Code())
require.Equal(t, tc.msg, s.Message())
})
}
}

View File

@ -27,9 +27,9 @@ import (
// TestCase defines a single filtering test case to run.
type TestCase struct {
Name string
Ticket *pb.Ticket
Pool *pb.Pool
Name string
SearchFields *pb.SearchFields
Pool *pb.Pool
}
// IncludedTestCases returns a list of test cases where using the given filter,
@ -39,22 +39,38 @@ func IncludedTestCases() []TestCase {
return []TestCase{
{
"no filters or fields",
&pb.Ticket{},
nil,
&pb.Pool{},
},
simpleDoubleRange("simpleInRange", 5, 0, 10),
simpleDoubleRange("exactMatch", 5, 5, 5),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeNone", 0, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeNone", 1, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeMin", 1, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeMax", 0, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeBoth", 2, 0, 3, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 3, pb.DoubleRangeFilter_BOTH),
{
"String equals simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
&pb.Pool{
@ -69,11 +85,9 @@ func IncludedTestCases() []TestCase {
{
"TagPresent simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"mytag",
},
&pb.SearchFields{
Tags: []string{
"mytag",
},
},
&pb.Pool{
@ -87,11 +101,9 @@ func IncludedTestCases() []TestCase {
{
"TagPresent multiple all present",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
&pb.Pool{
@ -113,21 +125,21 @@ func IncludedTestCases() []TestCase {
{
"CreatedBefore simple positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
},
},
{
"CreatedAfter simple positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
},
},
{
"Between CreatedBefore and CreatedAfter positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
@ -135,7 +147,7 @@ func IncludedTestCases() []TestCase {
},
{
"No time search criteria positive",
&pb.Ticket{},
nil,
&pb.Pool{},
},
}
@ -148,7 +160,7 @@ func ExcludedTestCases() []TestCase {
return []TestCase{
{
"DoubleRange no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
@ -161,7 +173,7 @@ func ExcludedTestCases() []TestCase {
},
{
"StringEquals no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
@ -173,7 +185,7 @@ func ExcludedTestCases() []TestCase {
},
{
"TagPresent no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
@ -182,14 +194,11 @@ func ExcludedTestCases() []TestCase {
},
},
},
{
"double range missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
},
&pb.Pool{
@ -203,22 +212,66 @@ func ExcludedTestCases() []TestCase {
},
},
simpleDoubleRange("valueTooLow", -1, 0, 10),
simpleDoubleRange("valueTooHigh", 11, 0, 10),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsMax", 1, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsMin", 0, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeBoth", 0, 0, 1, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 1, pb.DoubleRangeFilter_BOTH),
{
"String equals simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
&pb.Pool{
@ -233,11 +286,9 @@ func ExcludedTestCases() []TestCase {
{
"String equals missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
&pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
},
&pb.Pool{
@ -252,11 +303,9 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"MYTAG",
},
&pb.SearchFields{
Tags: []string{
"MYTAG",
},
},
&pb.Pool{
@ -270,11 +319,9 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent multiple with one missing",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
&pb.Pool{
@ -294,21 +341,21 @@ func ExcludedTestCases() []TestCase {
{
"CreatedBefore simple negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
},
},
{
"CreatedAfter simple negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
},
},
{
"Created before time range negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
@ -316,7 +363,7 @@ func ExcludedTestCases() []TestCase {
},
{
"Created after time range negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
@ -329,14 +376,12 @@ func ExcludedTestCases() []TestCase {
}
}
func simpleDoubleRange(name string, value, min, max float64) TestCase {
func simpleDoubleRange(name string, value, min, max float64, exclude pb.DoubleRangeFilter_Exclude) TestCase {
return TestCase{
"double range " + name,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
},
&pb.Pool{
@ -345,6 +390,7 @@ func simpleDoubleRange(name string, value, min, max float64) TestCase {
DoubleArg: "field",
Min: min,
Max: max,
Exclude: exclude,
},
},
},
@ -369,16 +415,14 @@ func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
return TestCase{
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{

182
internal/ipb/messages.pb.go Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.10.1
// source: internal/api/messages.proto
package ipb
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type BackfillInternal struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Represents a backfill entity which is used to fill partially full matches
Backfill *pb.Backfill `protobuf:"bytes,1,opt,name=backfill,proto3" json:"backfill,omitempty"`
// List of ticket IDs associated with a current backfill
TicketIds []string `protobuf:"bytes,2,rep,name=ticket_ids,json=ticketIds,proto3" json:"ticket_ids,omitempty"`
}
func (x *BackfillInternal) Reset() {
*x = BackfillInternal{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_messages_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BackfillInternal) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackfillInternal) ProtoMessage() {}
func (x *BackfillInternal) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_messages_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BackfillInternal.ProtoReflect.Descriptor instead.
func (*BackfillInternal) Descriptor() ([]byte, []int) {
return file_internal_api_messages_proto_rawDescGZIP(), []int{0}
}
func (x *BackfillInternal) GetBackfill() *pb.Backfill {
if x != nil {
return x.Backfill
}
return nil
}
func (x *BackfillInternal) GetTicketIds() []string {
if x != nil {
return x.TicketIds
}
return nil
}
var File_internal_api_messages_proto protoreflect.FileDescriptor
var file_internal_api_messages_proto_rawDesc = []byte{
0x0a, 0x1b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x62, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c,
0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x2f, 0x0a, 0x08, 0x62, 0x61, 0x63,
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70,
0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69,
0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_internal_api_messages_proto_rawDescOnce sync.Once
file_internal_api_messages_proto_rawDescData = file_internal_api_messages_proto_rawDesc
)
func file_internal_api_messages_proto_rawDescGZIP() []byte {
file_internal_api_messages_proto_rawDescOnce.Do(func() {
file_internal_api_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_messages_proto_rawDescData)
})
return file_internal_api_messages_proto_rawDescData
}
var file_internal_api_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_internal_api_messages_proto_goTypes = []interface{}{
(*BackfillInternal)(nil), // 0: openmatch.internal.BackfillInternal
(*pb.Backfill)(nil), // 1: openmatch.Backfill
}
var file_internal_api_messages_proto_depIdxs = []int32{
1, // 0: openmatch.internal.BackfillInternal.backfill:type_name -> openmatch.Backfill
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_api_messages_proto_init() }
func file_internal_api_messages_proto_init() {
if File_internal_api_messages_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_api_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BackfillInternal); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_messages_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_internal_api_messages_proto_goTypes,
DependencyIndexes: file_internal_api_messages_proto_depIdxs,
MessageInfos: file_internal_api_messages_proto_msgTypes,
}.Build()
File_internal_api_messages_proto = out.File
file_internal_api_messages_proto_rawDesc = nil
file_internal_api_messages_proto_goTypes = nil
file_internal_api_messages_proto_depIdxs = nil
}

View File

@ -1,71 +1,102 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.10.1
// source: internal/api/synchronizer.proto
package ipb
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type SynchronizeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// A match returned by an mmf.
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
}
func (m *SynchronizeRequest) Reset() { *m = SynchronizeRequest{} }
func (m *SynchronizeRequest) String() string { return proto.CompactTextString(m) }
func (*SynchronizeRequest) ProtoMessage() {}
func (x *SynchronizeRequest) Reset() {
*x = SynchronizeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeRequest) ProtoMessage() {}
func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead.
func (*SynchronizeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_35ff6b85fea1c4b7, []int{0}
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{0}
}
func (m *SynchronizeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeRequest.Unmarshal(m, b)
}
func (m *SynchronizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeRequest.Marshal(b, m, deterministic)
}
func (m *SynchronizeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeRequest.Merge(m, src)
}
func (m *SynchronizeRequest) XXX_Size() int {
return xxx_messageInfo_SynchronizeRequest.Size(m)
}
func (m *SynchronizeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeRequest proto.InternalMessageInfo
func (m *SynchronizeRequest) GetProposal() *pb.Match {
if m != nil {
return m.Proposal
func (x *SynchronizeRequest) GetProposal() *pb.Match {
if x != nil {
return x.Proposal
}
return nil
}
type SynchronizeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Instructs the backend call that it can start running the mmfs.
StartMmfs bool `protobuf:"varint,1,opt,name=start_mmfs,json=startMmfs,proto3" json:"start_mmfs,omitempty"`
// Instructs the backend call that it should cancel any RPC calls to the mmfs,
@ -73,93 +104,181 @@ type SynchronizeResponse struct {
CancelMmfs bool `protobuf:"varint,2,opt,name=cancel_mmfs,json=cancelMmfs,proto3" json:"cancel_mmfs,omitempty"`
// A match ID returned by the evaluator and should be returned to the FetchMatches
// caller.
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
}
func (m *SynchronizeResponse) Reset() { *m = SynchronizeResponse{} }
func (m *SynchronizeResponse) String() string { return proto.CompactTextString(m) }
func (*SynchronizeResponse) ProtoMessage() {}
func (x *SynchronizeResponse) Reset() {
*x = SynchronizeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeResponse) ProtoMessage() {}
func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead.
func (*SynchronizeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_35ff6b85fea1c4b7, []int{1}
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{1}
}
func (m *SynchronizeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeResponse.Unmarshal(m, b)
}
func (m *SynchronizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeResponse.Marshal(b, m, deterministic)
}
func (m *SynchronizeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeResponse.Merge(m, src)
}
func (m *SynchronizeResponse) XXX_Size() int {
return xxx_messageInfo_SynchronizeResponse.Size(m)
}
func (m *SynchronizeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeResponse proto.InternalMessageInfo
func (m *SynchronizeResponse) GetStartMmfs() bool {
if m != nil {
return m.StartMmfs
func (x *SynchronizeResponse) GetStartMmfs() bool {
if x != nil {
return x.StartMmfs
}
return false
}
func (m *SynchronizeResponse) GetCancelMmfs() bool {
if m != nil {
return m.CancelMmfs
func (x *SynchronizeResponse) GetCancelMmfs() bool {
if x != nil {
return x.CancelMmfs
}
return false
}
func (m *SynchronizeResponse) GetMatchId() string {
if m != nil {
return m.MatchId
func (x *SynchronizeResponse) GetMatchId() string {
if x != nil {
return x.MatchId
}
return ""
}
func init() {
proto.RegisterType((*SynchronizeRequest)(nil), "openmatch.internal.SynchronizeRequest")
proto.RegisterType((*SynchronizeResponse)(nil), "openmatch.internal.SynchronizeResponse")
var File_internal_api_synchronizer_proto protoreflect.FileDescriptor
var file_internal_api_synchronizer_proto_rawDesc = []byte{
0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x12, 0x53, 0x79, 0x6e,
0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x22, 0x76, 0x0a,
0x13, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4d,
0x6d, 0x66, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c,
0x4d, 0x6d, 0x66, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x4a,
0x04, 0x08, 0x03, 0x10, 0x04, 0x32, 0x72, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func init() { proto.RegisterFile("internal/api/synchronizer.proto", fileDescriptor_35ff6b85fea1c4b7) }
var (
file_internal_api_synchronizer_proto_rawDescOnce sync.Once
file_internal_api_synchronizer_proto_rawDescData = file_internal_api_synchronizer_proto_rawDesc
)
var fileDescriptor_35ff6b85fea1c4b7 = []byte{
// 263 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0x89, 0x16, 0x4d, 0x27, 0x1e, 0xca, 0x7a, 0xa9, 0x05, 0x69, 0xe9, 0xa1, 0xe6, 0xa0,
0x1b, 0xa9, 0xdf, 0xa0, 0x37, 0x85, 0x5e, 0xe2, 0xcd, 0x4b, 0xd9, 0x24, 0x53, 0xbb, 0x90, 0xfd,
0xe3, 0xce, 0x5a, 0xd0, 0x4f, 0x2f, 0xd9, 0xc5, 0xa6, 0xd2, 0x83, 0x97, 0x85, 0x37, 0xf3, 0xdb,
0x37, 0xcc, 0x1b, 0x98, 0x4a, 0xed, 0xd1, 0x69, 0xd1, 0x16, 0xc2, 0xca, 0x82, 0xbe, 0x74, 0xbd,
0x73, 0x46, 0xcb, 0x6f, 0x74, 0xdc, 0x3a, 0xe3, 0x0d, 0x63, 0xc6, 0xa2, 0x56, 0xc2, 0xd7, 0x3b,
0xfe, 0x8b, 0x4e, 0x58, 0xc7, 0x2a, 0x24, 0x12, 0xef, 0x48, 0x91, 0x9b, 0xaf, 0x80, 0xbd, 0xf6,
0xbf, 0x4b, 0xfc, 0xf8, 0x44, 0xf2, 0xec, 0x1e, 0x52, 0xeb, 0x8c, 0x35, 0x24, 0xda, 0x71, 0x32,
0x4b, 0xf2, 0x6c, 0x39, 0xe2, 0xbd, 0xe1, 0xba, 0x7b, 0xcb, 0x03, 0x31, 0xdf, 0xc3, 0xf5, 0x1f,
0x0f, 0xb2, 0x46, 0x13, 0xb2, 0x5b, 0x00, 0xf2, 0xc2, 0xf9, 0x8d, 0x52, 0x5b, 0x0a, 0x36, 0x69,
0x39, 0x0c, 0x95, 0xb5, 0xda, 0x12, 0x9b, 0x42, 0x56, 0x0b, 0x5d, 0x63, 0x1b, 0xfb, 0x67, 0xa1,
0x0f, 0xb1, 0x14, 0x80, 0x1b, 0x48, 0xc3, 0xbc, 0x8d, 0x6c, 0xc6, 0x83, 0x59, 0x92, 0x0f, 0xcb,
0xcb, 0xa0, 0x9f, 0x9b, 0x97, 0x41, 0x7a, 0x3e, 0x1a, 0x2c, 0x1d, 0x5c, 0x1d, 0xcd, 0x75, 0xac,
0x82, 0xec, 0x48, 0xb3, 0x05, 0x3f, 0xcd, 0x80, 0x9f, 0x2e, 0x3b, 0xb9, 0xfb, 0x97, 0x8b, 0x0b,
0xe5, 0xc9, 0x63, 0xb2, 0xca, 0xdf, 0x16, 0x1d, 0xfd, 0x10, 0xf1, 0x06, 0xf7, 0x45, 0x2f, 0x8b,
0xc3, 0x51, 0xa4, 0xad, 0xaa, 0x8b, 0x10, 0xf0, 0xd3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84,
0xba, 0xf6, 0x41, 0xab, 0x01, 0x00, 0x00,
func file_internal_api_synchronizer_proto_rawDescGZIP() []byte {
file_internal_api_synchronizer_proto_rawDescOnce.Do(func() {
file_internal_api_synchronizer_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_synchronizer_proto_rawDescData)
})
return file_internal_api_synchronizer_proto_rawDescData
}
var file_internal_api_synchronizer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_internal_api_synchronizer_proto_goTypes = []interface{}{
(*SynchronizeRequest)(nil), // 0: openmatch.internal.SynchronizeRequest
(*SynchronizeResponse)(nil), // 1: openmatch.internal.SynchronizeResponse
(*pb.Match)(nil), // 2: openmatch.Match
}
var file_internal_api_synchronizer_proto_depIdxs = []int32{
2, // 0: openmatch.internal.SynchronizeRequest.proposal:type_name -> openmatch.Match
0, // 1: openmatch.internal.Synchronizer.Synchronize:input_type -> openmatch.internal.SynchronizeRequest
1, // 2: openmatch.internal.Synchronizer.Synchronize:output_type -> openmatch.internal.SynchronizeResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_api_synchronizer_proto_init() }
func file_internal_api_synchronizer_proto_init() {
if File_internal_api_synchronizer_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_api_synchronizer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_api_synchronizer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_synchronizer_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_internal_api_synchronizer_proto_goTypes,
DependencyIndexes: file_internal_api_synchronizer_proto_depIdxs,
MessageInfos: file_internal_api_synchronizer_proto_msgTypes,
}.Build()
File_internal_api_synchronizer_proto = out.File
file_internal_api_synchronizer_proto_rawDesc = nil
file_internal_api_synchronizer_proto_goTypes = nil
file_internal_api_synchronizer_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
const _ = grpc.SupportPackageIsVersion6
// SynchronizerClient is the client API for Synchronizer service.
//
@ -171,10 +290,10 @@ type SynchronizerClient interface {
}
type synchronizerClient struct {
cc *grpc.ClientConn
cc grpc.ClientConnInterface
}
func NewSynchronizerClient(cc *grpc.ClientConn) SynchronizerClient {
func NewSynchronizerClient(cc grpc.ClientConnInterface) SynchronizerClient {
return &synchronizerClient{cc}
}
@ -220,7 +339,7 @@ type SynchronizerServer interface {
type UnimplementedSynchronizerServer struct {
}
func (*UnimplementedSynchronizerServer) Synchronize(srv Synchronizer_SynchronizeServer) error {
func (*UnimplementedSynchronizerServer) Synchronize(Synchronizer_SynchronizeServer) error {
return status.Errorf(codes.Unimplemented, "method Synchronize not implemented")
}

View File

@ -21,7 +21,7 @@ import (
stackdriver "github.com/TV4/logrus-stackdriver-formatter"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewFormatter(t *testing.T) {
@ -37,9 +37,9 @@ func TestNewFormatter(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("newFormatter(%s) => %s", tc.in, tc.expected), func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
actual := newFormatter(tc.in)
assert.Equal(reflect.TypeOf(tc.expected), reflect.TypeOf(actual))
require.Equal(reflect.TypeOf(tc.expected), reflect.TypeOf(actual))
})
}
}
@ -60,9 +60,9 @@ func TestIsDebugLevel(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("isDebugLevel(%s) => %t", tc.in, tc.expected), func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
actual := isDebugLevel(tc.in)
assert.Equal(tc.expected, actual)
require.Equal(tc.expected, actual)
})
}
}
@ -87,9 +87,9 @@ func TestToLevel(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("toLevel(%s) => %s", tc.in, tc.expected), func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
actual := toLevel(tc.in)
assert.Equal(tc.expected, actual)
require.Equal(tc.expected, actual)
})
}
}

View File

@ -18,7 +18,7 @@ import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
@ -27,31 +27,31 @@ const (
)
func TestGetGRPC(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cc := NewClientCache(viper.New())
client, err := cc.GetGRPC(fakeGRPCAddress)
assert.Nil(err)
require.Nil(err)
cachedClient, err := cc.GetGRPC(fakeGRPCAddress)
assert.Nil(err)
require.Nil(err)
// Test caching by comparing pointer value
assert.EqualValues(client, cachedClient)
require.EqualValues(client, cachedClient)
}
func TestGetHTTP(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cc := NewClientCache(viper.New())
client, address, err := cc.GetHTTP(fakeHTTPAddress)
assert.Nil(err)
assert.Equal(fakeHTTPAddress, address)
require.Nil(err)
require.Equal(fakeHTTPAddress, address)
cachedClient, address, err := cc.GetHTTP(fakeHTTPAddress)
assert.Nil(err)
assert.Equal(fakeHTTPAddress, address)
require.Nil(err)
require.Equal(fakeHTTPAddress, address)
// Test caching by comparing pointer value
assert.EqualValues(client, cachedClient)
require.EqualValues(client, cachedClient)
}

View File

@ -22,8 +22,11 @@ import (
"os"
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/telemetry"
@ -34,39 +37,48 @@ import (
)
func TestSecureGRPCFromConfig(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, true)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
defer closer()
runGrpcClientTests(t, assert, cfg, rpcParams)
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
}
func TestInsecureGRPCFromConfig(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, false)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
defer closer()
runGrpcClientTests(t, assert, cfg, rpcParams)
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
}
func TestUnavailableGRPCFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "badhost")
defer closer()
runFailureGrpcClientTests(t, require, cfg, rpcParams, codes.Unavailable)
}
func TestHTTPSFromConfig(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, true)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
defer closer()
runHTTPClientTests(assert, cfg, rpcParams)
runHTTPClientTests(require, cfg, rpcParams)
}
func TestInsecureHTTPFromConfig(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(assert, false)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
defer closer()
runHTTPClientTests(assert, cfg, rpcParams)
runHTTPClientTests(require, cfg, rpcParams)
}
func TestSanitizeHTTPAddress(t *testing.T) {
@ -88,15 +100,15 @@ func TestSanitizeHTTPAddress(t *testing.T) {
tc := testCase
description := fmt.Sprintf("sanitizeHTTPAddress(%s, %t) => (%s, %v)", tc.address, tc.preferHTTPS, tc.expected, tc.err)
t.Run(description, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
actual, err := sanitizeHTTPAddress(tc.address, tc.preferHTTPS)
assert.Equal(tc.expected, actual)
assert.Equal(tc.err, err)
require.Equal(tc.expected, actual)
require.Equal(tc.err, err)
})
}
}
func runGrpcClientTests(t *testing.T, assert *assert.Assertions, cfg config.View, rpcParams *ServerParams) {
func setupClientConnection(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) *grpc.ClientConn {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
rpcParams.AddHandleFunc(func(s *grpc.Server) {
@ -104,24 +116,45 @@ func runGrpcClientTests(t *testing.T, assert *assert.Assertions, cfg config.View
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := &Server{}
defer s.Stop()
t.Cleanup(func() {
defer s.Stop()
})
err := s.Start(rpcParams)
assert.Nil(err)
require.Nil(err)
// Acquire grpc client
grpcConn, err := GRPCClientFromConfig(cfg, "test")
assert.Nil(err)
assert.NotNil(grpcConn)
require.Nil(err)
require.NotNil(grpcConn)
return grpcConn
}
func runSuccessGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(grpcConn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
assert.Nil(err)
assert.NotNil(grpcResp)
require.Nil(err)
require.NotNil(grpcResp)
}
func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *ServerParams) {
func runFailureGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams, expectedCode codes.Code) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(grpcConn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
require.Error(err)
require.Nil(grpcResp)
code := status.Code(err)
require.Equal(expectedCode, code)
}
func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
rpcParams.AddHandleFunc(func(s *grpc.Server) {
@ -130,20 +163,20 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
s := &Server{}
defer s.Stop()
err := s.Start(rpcParams)
assert.Nil(err)
require.Nil(err)
// Acquire http client
httpClient, baseURL, err := HTTPClientFromConfig(cfg, "test")
assert.Nil(err)
require.Nil(err)
// Confirm the client works as expected
httpReq, err := http.NewRequest(http.MethodGet, baseURL+telemetry.HealthCheckEndpoint, nil)
assert.Nil(err)
assert.NotNil(httpReq)
require.Nil(err)
require.NotNil(httpReq)
httpResp, err := httpClient.Do(httpReq)
assert.Nil(err)
assert.NotNil(httpResp)
require.Nil(err)
require.NotNil(httpResp)
defer func() {
if httpResp != nil {
httpResp.Body.Close()
@ -151,13 +184,13 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
}()
body, err := ioutil.ReadAll(httpResp.Body)
assert.Nil(err)
assert.Equal(200, httpResp.StatusCode)
assert.Equal("ok", string(body))
require.Nil(err)
require.Equal(200, httpResp.StatusCode)
require.Equal("ok", string(body))
}
// Generate a config view and optional TLS key manifests (optional) for testing
func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool, host string) (config.View, *ServerParams, func()) {
// Create netlisteners on random ports used for rpc serving
grpcL := MustListen()
httpL := MustListen()
@ -165,13 +198,13 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
// Generate a config view with paths to the manifests
cfg := viper.New()
cfg.Set("test.hostname", "localhost")
cfg.Set("test.hostname", host)
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
cfg.Set("test.httpport", MustGetPortNumber(httpL))
// Create temporary TLS key files for testing
pubFile, err := ioutil.TempFile("", "pub*")
assert.Nil(err)
require.Nil(err)
if tlsEnabled {
// Generate public and private key bytes
@ -179,11 +212,11 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL)),
fmt.Sprintf("localhost:%s", MustGetPortNumber(httpL)),
})
assert.Nil(err)
require.Nil(err)
// Write certgen key bytes to the temp files
err = ioutil.WriteFile(pubFile.Name(), pubBytes, 0400)
assert.Nil(err)
require.Nil(err)
// Generate a config view with paths to the manifests
cfg.Set(configNameClientTrustedCertificatePath, pubFile.Name())
@ -191,7 +224,7 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
rpcParams.SetTLSConfiguration(pubBytes, pubBytes, priBytes)
}
return cfg, rpcParams, func() { removeTempFile(assert, pubFile.Name()) }
return cfg, rpcParams, func() { removeTempFile(t, pubFile.Name()) }
}
func MustListen() net.Listener {
@ -210,9 +243,11 @@ func MustGetPortNumber(l net.Listener) string {
return port
}
func removeTempFile(assert *assert.Assertions, paths ...string) {
func removeTempFile(t *testing.T, paths ...string) {
for _, path := range paths {
err := os.Remove(path)
assert.Nil(err)
if err != nil {
t.Errorf("Can not remove the temporary file: %s, err: %s", path, err.Error())
}
}
}

View File

@ -22,13 +22,13 @@ import (
"open-match.dev/open-match/pkg/pb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
shellTesting "open-match.dev/open-match/internal/testing"
)
func TestInsecureStartStop(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
grpcL := MustListen()
httpL := MustListen()
ff := &shellTesting.FakeFrontend{}
@ -40,15 +40,15 @@ func TestInsecureStartStop(t *testing.T) {
s := newInsecureServer(grpcL, httpL)
defer s.stop()
err := s.start(params)
assert.Nil(err)
require.Nil(err)
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
assert.Nil(err)
require.Nil(err)
defer conn.Close()
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
httpClient := &http.Client{
Timeout: time.Second,
}
runGrpcWithProxyTests(t, assert, s, conn, httpClient, endpoint)
runGrpcWithProxyTests(t, require, s, conn, httpClient, endpoint)
}

View File

@ -87,6 +87,11 @@ type ServerParams struct {
// NewServerParamsFromConfig returns server Params initialized from the configuration file.
func NewServerParamsFromConfig(cfg config.View, prefix string, listen func(network, address string) (net.Listener, error)) (*ServerParams, error) {
serverLogger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": prefix,
})
grpcL, err := listen("tcp", fmt.Sprintf(":%d", cfg.GetInt(prefix+".grpcport")))
if err != nil {
return nil, errors.Wrap(err, "can't start listener for grpc")
@ -283,6 +288,9 @@ func newGRPCServerOptions(params *ServerParams) []grpc.ServerOption {
}
}
ui = append(ui, serverUnaryInterceptor)
si = append(si, serverStreamInterceptor)
if params.enableMetrics {
opts = append(opts, grpc.StatsHandler(&ocgrpc.ServerHandler{}))
}
@ -297,3 +305,25 @@ func newGRPCServerOptions(params *ServerParams) []grpc.ServerOption {
},
))
}
func serverStreamInterceptor(srv interface{},
stream grpc.ServerStream,
info *grpc.StreamServerInfo,
handler grpc.StreamHandler) error {
err := handler(srv, stream)
if err != nil {
serverLogger.Error(err)
}
return err
}
func serverUnaryInterceptor(ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (interface{}, error) {
h, err := handler(ctx, req)
if err != nil {
serverLogger.Error(err)
}
return h, err
}

View File

@ -22,7 +22,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/telemetry"
shellTesting "open-match.dev/open-match/internal/testing"
@ -31,7 +31,7 @@ import (
)
func TestStartStopServer(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
grpcL := MustListen()
httpL := MustListen()
ff := &shellTesting.FakeFrontend{}
@ -44,57 +44,57 @@ func TestStartStopServer(t *testing.T) {
defer s.Stop()
err := s.Start(params)
assert.Nil(err)
require.Nil(err)
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
assert.Nil(err)
require.Nil(err)
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
httpClient := &http.Client{
Timeout: time.Second,
}
runGrpcWithProxyTests(t, assert, s.serverWithProxy, conn, httpClient, endpoint)
runGrpcWithProxyTests(t, require, s.serverWithProxy, conn, httpClient, endpoint)
}
func runGrpcWithProxyTests(t *testing.T, assert *assert.Assertions, s grpcServerWithProxy, conn *grpc.ClientConn, httpClient *http.Client, endpoint string) {
func runGrpcWithProxyTests(t *testing.T, require *require.Assertions, s grpcServerWithProxy, conn *grpc.ClientConn, httpClient *http.Client, endpoint string) {
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(conn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
assert.Nil(err)
assert.NotNil(grpcResp)
require.Nil(err)
require.NotNil(grpcResp)
httpReq, err := http.NewRequest(http.MethodPost, endpoint+"/v1/frontendservice/tickets", strings.NewReader("{}"))
assert.Nil(err)
assert.NotNil(httpReq)
require.Nil(err)
require.NotNil(httpReq)
httpResp, err := httpClient.Do(httpReq)
assert.Nil(err)
assert.NotNil(httpResp)
require.Nil(err)
require.NotNil(httpResp)
defer func() {
if httpResp != nil {
httpResp.Body.Close()
}
}()
body, err := ioutil.ReadAll(httpResp.Body)
assert.Nil(err)
assert.Equal(200, httpResp.StatusCode)
assert.Equal("{}", string(body))
require.Nil(err)
require.Equal(200, httpResp.StatusCode)
require.Equal("{}", string(body))
httpReq, err = http.NewRequest(http.MethodGet, endpoint+telemetry.HealthCheckEndpoint, nil)
assert.Nil(err)
require.Nil(err)
httpResp, err = httpClient.Do(httpReq)
assert.Nil(err)
assert.NotNil(httpResp)
require.Nil(err)
require.NotNil(httpResp)
defer func() {
if httpResp != nil {
httpResp.Body.Close()
}
}()
body, err = ioutil.ReadAll(httpResp.Body)
assert.Nil(err)
assert.Equal(200, httpResp.StatusCode)
assert.Equal("ok", string(body))
require.Nil(err)
require.Equal(200, httpResp.StatusCode)
require.Equal("ok", string(body))
s.stop()
}

View File

@ -22,7 +22,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
shellTesting "open-match.dev/open-match/internal/testing"
@ -32,14 +32,14 @@ import (
// TestStartStopTlsServerWithCARootedCertificate verifies that we can have a gRPC+TLS+HTTPS server/client work with a single self-signed certificate.
func TestStartStopTlsServerWithSingleCertificate(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
grpcL := MustListen()
proxyL := MustListen()
grpcAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL))
proxyAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(proxyL))
allHostnames := []string{grpcAddress, proxyAddress}
pub, priv, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting(allHostnames)
assert.Nil(err)
require.Nil(err)
runTestStartStopTLSServer(t, &tlsServerTestParams{
rootPublicCertificateFileData: pub,
rootPrivateKeyFileData: priv,
@ -54,17 +54,17 @@ func TestStartStopTlsServerWithSingleCertificate(t *testing.T) {
// TestStartStopTlsServerWithCARootedCertificate verifies that we can have a gRPC+TLS+HTTPS server/client work with a self-signed CA-rooted certificate.
func TestStartStopTlsServerWithCARootedCertificate(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
grpcL := MustListen()
proxyL := MustListen()
grpcAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL))
proxyAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(proxyL))
allHostnames := []string{grpcAddress, proxyAddress}
rootPub, rootPriv, err := certgenTesting.CreateRootCertificateAndPrivateKeyForTesting(allHostnames)
assert.Nil(err)
require.Nil(err)
pub, priv, err := certgenTesting.CreateDerivedCertificateAndPrivateKeyForTesting(rootPub, rootPriv, allHostnames)
assert.Nil(err)
require.Nil(err)
runTestStartStopTLSServer(t, &tlsServerTestParams{
rootPublicCertificateFileData: rootPub,
@ -90,7 +90,7 @@ type tlsServerTestParams struct {
}
func runTestStartStopTLSServer(t *testing.T, tp *tlsServerTestParams) {
assert := assert.New(t)
require := require.New(t)
ff := &shellTesting.FakeFrontend{}
@ -104,16 +104,16 @@ func runTestStartStopTLSServer(t *testing.T, tp *tlsServerTestParams) {
defer s.stop()
err := s.start(serverParams)
assert.Nil(err)
require.Nil(err)
pool, err := trustedCertificateFromFileData(tp.rootPublicCertificateFileData)
assert.Nil(err)
require.Nil(err)
conn, err := grpc.Dial(tp.grpcAddress, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(pool, tp.grpcAddress)))
assert.Nil(err)
require.Nil(err)
tlsCert, err := certificateFromFileData(tp.publicCertificateFileData, tp.privateKeyFileData)
assert.Nil(err)
require.Nil(err)
tlsTransport := &http.Transport{
TLSClientConfig: &tls.Config{
ServerName: tp.proxyAddress,
@ -126,5 +126,5 @@ func runTestStartStopTLSServer(t *testing.T, tp *tlsServerTestParams) {
Timeout: time.Second * 10,
Transport: tlsTransport,
}
runGrpcWithProxyTests(t, assert, s, conn, httpClient, httpsEndpoint)
runGrpcWithProxyTests(t, require, s, conn, httpClient, httpsEndpoint)
}

View File

@ -0,0 +1,465 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statestore
import (
"context"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/pkg/pb"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "statestore.redis",
})
)
const (
backfillLastAckTime = "backfill_last_ack_time"
allBackfills = "allBackfills"
)
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist. The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization. Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
func (rb *redisBackend) CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "CreateBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
}
defer handleConnectionClose(&redisConn)
bf := ipb.BackfillInternal{
Backfill: backfill,
TicketIds: ticketIDs,
}
value, err := proto.Marshal(&bf)
if err != nil {
err = errors.Wrapf(err, "failed to marshal the backfill proto, id: %s", backfill.GetId())
return status.Errorf(codes.Internal, "%v", err)
}
res, err := redisConn.Do("SETNX", backfill.GetId(), value)
if err != nil {
err = errors.Wrapf(err, "failed to set the value for backfill, id: %s", backfill.GetId())
return status.Errorf(codes.Internal, "%v", err)
}
if res.(int64) == 0 {
return status.Errorf(codes.AlreadyExists, "backfill already exists, id: %s", backfill.GetId())
}
return doUpdateAcknowledgmentTimestamp(redisConn, backfill.GetId())
}
// GetBackfill gets the Backfill with the specified id from state storage. This method fails if the Backfill does not exist. Returns the Backfill and associated ticketIDs if they exist.
func (rb *redisBackend) GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error) {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return nil, nil, status.Errorf(codes.Unavailable, "GetBackfill, id: %s, failed to connect to redis: %v", id, err)
}
defer handleConnectionClose(&redisConn)
value, err := redis.Bytes(redisConn.Do("GET", id))
if err != nil {
// Return NotFound if redigo did not find the backfill in storage.
if err == redis.ErrNil {
return nil, nil, status.Errorf(codes.NotFound, "Backfill id: %s not found", id)
}
err = errors.Wrapf(err, "failed to get the backfill from state storage, id: %s", id)
return nil, nil, status.Errorf(codes.Internal, "%v", err)
}
if value == nil {
return nil, nil, status.Errorf(codes.NotFound, "Backfill id: %s not found", id)
}
bi := &ipb.BackfillInternal{}
err = proto.Unmarshal(value, bi)
if err != nil {
err = errors.Wrapf(err, "failed to unmarshal internal backfill, id: %s", id)
return nil, nil, status.Errorf(codes.Internal, "%v", err)
}
return bi.Backfill, bi.TicketIds, nil
}
// GetBackfills returns multiple backfills from storage
func (rb *redisBackend) GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error) {
if len(ids) == 0 {
return nil, nil
}
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return nil, status.Errorf(codes.Unavailable, "GetBackfills, failed to connect to redis: %v", err)
}
defer handleConnectionClose(&redisConn)
queryParams := make([]interface{}, len(ids))
for i, id := range ids {
queryParams[i] = id
}
slices, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
if err != nil {
err = errors.Wrapf(err, "failed to lookup backfills: %v", ids)
return nil, status.Errorf(codes.Internal, "%v", err)
}
m := make(map[string]*pb.Backfill, len(ids))
for i, s := range slices {
if s != nil {
b := &ipb.BackfillInternal{}
err = proto.Unmarshal(s, b)
if err != nil {
err = errors.Wrapf(err, "failed to unmarshal backfill from redis, key: %s", ids[i])
return nil, status.Errorf(codes.Internal, "%v", err)
}
if b.Backfill != nil {
m[b.Backfill.Id] = b.Backfill
}
}
}
var notFound []string
result := make([]*pb.Backfill, 0, len(ids))
for _, id := range ids {
if b, ok := m[id]; ok {
result = append(result, b)
} else {
notFound = append(notFound, id)
}
}
if len(notFound) > 0 {
redisLogger.Warningf("failed to lookup backfills: %v", notFound)
}
return result, nil
}
// DeleteBackfill removes the Backfill with the specified id from state storage. This method succeeds if the Backfill does not exist.
func (rb *redisBackend) DeleteBackfill(ctx context.Context, id string) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "DeleteBackfill, id: %s, failed to connect to redis: %v", id, err)
}
defer handleConnectionClose(&redisConn)
_, err = redisConn.Do("DEL", id)
if err != nil {
err = errors.Wrapf(err, "failed to delete the backfill from state storage, id: %s", id)
return status.Errorf(codes.Internal, "%v", err)
}
return rb.deleteExpiredBackfillID(redisConn, id)
}
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
func (rb *redisBackend) UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "UpdateBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
}
defer handleConnectionClose(&redisConn)
expired, err := isBackfillExpired(redisConn, backfill.Id, getBackfillReleaseTimeout(rb.cfg))
if err != nil {
return err
}
if expired {
return status.Errorf(codes.Unavailable, "can not update an expired backfill, id: %s", backfill.Id)
}
bf := ipb.BackfillInternal{
Backfill: backfill,
TicketIds: ticketIDs,
}
value, err := proto.Marshal(&bf)
if err != nil {
err = errors.Wrapf(err, "failed to marshal the backfill proto, id: %s", backfill.GetId())
return status.Errorf(codes.Internal, "%v", err)
}
_, err = redisConn.Do("SET", backfill.GetId(), value)
if err != nil {
err = errors.Wrapf(err, "failed to set the value for backfill, id: %s", backfill.GetId())
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
func isBackfillExpired(conn redis.Conn, id string, ttl time.Duration) (bool, error) {
lastAckTime, err := redis.Float64(conn.Do("ZSCORE", backfillLastAckTime, id))
if err != nil {
return false, status.Errorf(codes.Internal, "%v",
errors.Wrapf(err, "failed to get backfill's last acknowledgement time, id: %s", id))
}
endTime := time.Now().Add(-ttl).UnixNano()
return int64(lastAckTime) < endTime, nil
}
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
func (rb *redisBackend) DeleteBackfillCompletely(ctx context.Context, id string) error {
m := rb.NewMutex(id)
err := m.Lock(ctx)
if err != nil {
return err
}
defer func() {
if _, err = m.Unlock(ctx); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
// 1. deindex backfill
err = rb.DeindexBackfill(ctx, id)
if err != nil {
return err
}
// just log errors and try to perform as mush actions as possible
// 2. get associated with a current backfill tickets ids
_, associatedTickets, err := rb.GetBackfill(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"backfill_id": id,
}).Error("DeleteBackfillCompletely - failed to GetBackfill")
}
// 3. delete associated tickets from pending release state
err = rb.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"backfill_id": id,
}).Error("DeleteBackfillCompletely - failed to DeleteTicketsFromPendingRelease")
}
// 4. delete backfill
err = rb.DeleteBackfill(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"backfill_id": id,
}).Error("DeleteBackfillCompletely - failed to DeleteBackfill")
}
return nil
}
func (rb *redisBackend) cleanupWorker(ctx context.Context, backfillIDsCh <-chan string, wg *sync.WaitGroup) {
var err error
for id := range backfillIDsCh {
err = rb.DeleteBackfillCompletely(ctx, id)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"backfill_id": id,
}).Error("CleanupBackfills")
}
wg.Done()
}
}
// CleanupBackfills removes expired backfills
func (rb *redisBackend) CleanupBackfills(ctx context.Context) error {
expiredBfIDs, err := rb.GetExpiredBackfillIDs(ctx)
if err != nil {
return err
}
var wg sync.WaitGroup
wg.Add(len(expiredBfIDs))
backfillIDsCh := make(chan string, len(expiredBfIDs))
for w := 1; w <= 3; w++ {
go rb.cleanupWorker(ctx, backfillIDsCh, &wg)
}
for _, id := range expiredBfIDs {
backfillIDsCh <- id
}
close(backfillIDsCh)
wg.Wait()
return nil
}
// UpdateAcknowledgmentTimestamp stores Backfill's last acknowledgement time.
// Check on Backfill existence should be performed on Frontend side
func (rb *redisBackend) UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "UpdateAcknowledgmentTimestamp, id: %s, failed to connect to redis: %v", id, err)
}
defer handleConnectionClose(&redisConn)
expired, err := isBackfillExpired(redisConn, id, getBackfillReleaseTimeout(rb.cfg))
if err != nil {
return err
}
if expired {
return status.Errorf(codes.Unavailable, "can not acknowledge an expired backfill, id: %s", id)
}
return doUpdateAcknowledgmentTimestamp(redisConn, id)
}
func doUpdateAcknowledgmentTimestamp(conn redis.Conn, backfillID string) error {
currentTime := time.Now().UnixNano()
_, err := conn.Do("ZADD", backfillLastAckTime, currentTime, backfillID)
if err != nil {
return status.Errorf(codes.Internal, "%v",
errors.Wrap(err, "failed to store backfill's last acknowledgement time"))
}
return nil
}
// GetExpiredBackfillIDs gets all backfill IDs which are expired
func (rb *redisBackend) GetExpiredBackfillIDs(ctx context.Context) ([]string, error) {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return nil, status.Errorf(codes.Unavailable, "GetExpiredBackfillIDs, failed to connect to redis: %v", err)
}
defer handleConnectionClose(&redisConn)
ttl := getBackfillReleaseTimeout(rb.cfg)
curTime := time.Now()
endTimeInt := curTime.Add(-ttl).UnixNano()
startTimeInt := 0
// Filter out backfill IDs that are fetched but not assigned within TTL time (ms).
expiredBackfillIds, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", backfillLastAckTime, startTimeInt, endTimeInt))
if err != nil {
return nil, status.Errorf(codes.Internal, "error getting expired backfills %v", err)
}
return expiredBackfillIds, nil
}
// deleteExpiredBackfillID deletes expired BackfillID from a sorted set
func (rb *redisBackend) deleteExpiredBackfillID(conn redis.Conn, backfillID string) error {
_, err := conn.Do("ZREM", backfillLastAckTime, backfillID)
if err != nil {
return status.Errorf(codes.Internal, "failed to delete expired backfill ID %s from Sorted Set %s",
backfillID, err.Error())
}
return nil
}
// IndexBackfill adds the backfill to the index.
func (rb *redisBackend) IndexBackfill(ctx context.Context, backfill *pb.Backfill) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "IndexBackfill, id: %s, failed to connect to redis: %v", backfill.GetId(), err)
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("HSET", allBackfills, backfill.Id, backfill.Generation)
if err != nil {
err = errors.Wrapf(err, "failed to add backfill to all backfills, id: %s", backfill.Id)
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
func (rb *redisBackend) DeindexBackfill(ctx context.Context, id string) error {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return status.Errorf(codes.Unavailable, "DeindexBackfill, id: %s, failed to connect to redis: %v", id, err)
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("HDEL", allBackfills, id)
if err != nil {
err = errors.Wrapf(err, "failed to remove ID from backfill index, id: %s", id)
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// GetIndexedBackfills returns the ids of all backfills currently indexed.
func (rb *redisBackend) GetIndexedBackfills(ctx context.Context) (map[string]int, error) {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
return nil, status.Errorf(codes.Unavailable, "GetIndexedBackfills, failed to connect to redis: %v", err)
}
defer handleConnectionClose(&redisConn)
ttl := getBackfillReleaseTimeout(rb.cfg)
curTime := time.Now()
endTimeInt := curTime.Add(time.Hour).UnixNano()
startTimeInt := curTime.Add(-ttl).UnixNano()
// Exclude expired backfills
acknowledgedIds, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", backfillLastAckTime, startTimeInt, endTimeInt))
if err != nil {
return nil, status.Errorf(codes.Internal, "error getting acknowledged backfills %v", err)
}
index, err := redis.StringMap(redisConn.Do("HGETALL", allBackfills))
if err != nil {
return nil, status.Errorf(codes.Internal, "error getting all indexed backfill ids %v", err)
}
r := make(map[string]int, len(acknowledgedIds))
for _, id := range acknowledgedIds {
if generation, ok := index[id]; ok {
gen, err := strconv.Atoi(generation)
if err != nil {
return nil, status.Errorf(codes.Internal, "error while parsing generation into number: %v", err)
}
r[id] = gen
}
}
return r, nil
}
func getBackfillReleaseTimeout(cfg config.View) time.Duration {
// Use a fraction 80% of pendingRelease Tickets TTL
ttl := cfg.GetDuration("pendingReleaseTimeout") / 5 * 4
return ttl
}

View File

@ -0,0 +1,803 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statestore
import (
"context"
"fmt"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/gomodule/redigo/redis"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
)
func TestCreateBackfillLastAckTime(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
bfID := "1234"
ctx := utilTesting.NewContext(t)
err := service.CreateBackfill(ctx, &pb.Backfill{
Id: bfID,
}, nil)
require.NoError(t, err)
pool := GetRedisPool(cfg)
conn := pool.Get()
// test that Backfill last acknowledged is in a sorted set
ts, redisErr := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bfID))
require.NoError(t, redisErr)
require.True(t, ts > 0, "timestamp is not valid")
}
func TestCreateBackfill(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
bf := pb.Backfill{
Id: "1",
Generation: 1,
}
var testCases = []struct {
description string
backfill *pb.Backfill
ticketIDs []string
expectedCode codes.Code
expectedMessage string
}{
{
description: "ok, backfill is passed, ticketIDs is nil",
backfill: &bf,
ticketIDs: []string{"1", "2"},
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "create existing backfill, err expected",
backfill: &bf,
ticketIDs: nil,
expectedCode: codes.AlreadyExists,
expectedMessage: "backfill already exists, id: 1",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
err := service.CreateBackfill(ctx, tc.backfill, tc.ticketIDs)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// pass an expired context, err expected
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
err := service.CreateBackfill(ctx, &pb.Backfill{
Id: "222",
}, nil)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "CreateBackfill, id: 222, failed to connect to redis:")
}
func TestUpdateExistingBackfillNoError(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
// ARRANGE
v := &wrappers.DoubleValue{Value: 123}
a, err := ptypes.MarshalAny(v)
require.NoError(t, err)
existingBF := pb.Backfill{
Id: "123",
Generation: 1,
SearchFields: &pb.SearchFields{
Tags: []string{"123"},
},
Extensions: map[string]*any.Any{
"qwe": a,
},
}
ticketIDs := []string{"1"}
err = service.CreateBackfill(ctx, &existingBF, ticketIDs)
require.NoError(t, err)
updateBF := pb.Backfill{
Id: existingBF.Id,
Generation: 5,
SearchFields: &pb.SearchFields{
Tags: []string{"456"},
},
Extensions: map[string]*any.Any{
"xyz": a,
},
}
updateTicketIDs := []string{"1"}
// ACT
err = service.UpdateBackfill(ctx, &updateBF, updateTicketIDs)
require.NoError(t, err)
// ASSERT
backfillActual, tIDsActual, err := service.GetBackfill(ctx, updateBF.Id)
require.NoError(t, err)
require.Equal(t, updateTicketIDs, tIDsActual)
require.Equal(t, updateBF.Id, backfillActual.Id)
require.Equal(t, updateBF.Generation, backfillActual.Generation)
require.NotNil(t, backfillActual.SearchFields)
require.Equal(t, updateBF.SearchFields.Tags, backfillActual.SearchFields.Tags)
res := &wrappers.DoubleValue{}
err = ptypes.UnmarshalAny(backfillActual.Extensions["xyz"], res)
require.NoError(t, err)
require.Equal(t, v.Value, res.Value)
}
func TestUpdateBackfillDoNotExistCanNotUpdate(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
v := &wrappers.DoubleValue{Value: 123}
a, err := ptypes.MarshalAny(v)
require.NoError(t, err)
updateBF := pb.Backfill{
Id: "123",
Generation: 5,
SearchFields: &pb.SearchFields{
Tags: []string{"456"},
},
Extensions: map[string]*any.Any{
"xyz": a,
},
}
updateTicketIDs := []string{"1"}
err = service.UpdateBackfill(ctx, &updateBF, updateTicketIDs)
require.Error(t, err)
require.Equal(t, codes.Internal.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "failed to get backfill's last acknowledgement time, id: 123")
}
func TestUpdateBackfillExpiredBackfillErrExpected(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
bfID := "bf1"
bfLastAck := "backfill_last_ack_time"
bf := pb.Backfill{
Id: bfID,
Generation: 5,
}
// add expired but acknowledged backfill
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
require.NoError(t, err)
err = service.UpdateBackfill(ctx, &bf, nil)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not update an expired backfill, id: %s", bfID))
}
func TestUpdateBackfillExpiredContextErrExpected(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
err := service.UpdateBackfill(ctx, &pb.Backfill{
Id: "222",
}, nil)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "UpdateBackfill, id: 222, failed to connect to redis:")
}
func TestGetBackfill(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
expectedBackfill := &pb.Backfill{
Id: "mockBackfillID",
Generation: 1,
}
expectedTicketIDs := []string{"1", "2"}
err := service.CreateBackfill(ctx, expectedBackfill, expectedTicketIDs)
require.NoError(t, err)
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
_, err = c.Do("SET", "wrong-type-key", "wrong-type-value")
require.NoError(t, err)
var testCases = []struct {
description string
backfillID string
expectedCode codes.Code
expectedMessage string
}{
{
description: "backfill is found",
backfillID: "mockBackfillID",
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "empty id passed, err expected",
backfillID: "",
expectedCode: codes.NotFound,
expectedMessage: "Backfill id: not found",
},
{
description: "wrong id passed, err expected",
backfillID: "123456",
expectedCode: codes.NotFound,
expectedMessage: "Backfill id: 123456 not found",
},
{
description: "item of a wrong type is requested, err expected",
backfillID: "wrong-type-key",
expectedCode: codes.Internal,
expectedMessage: "failed to unmarshal internal backfill, id: wrong-type-key:",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
backfillActual, tidsActual, errActual := service.GetBackfill(ctx, tc.backfillID)
if tc.expectedCode == codes.OK {
require.NoError(t, errActual)
require.NotNil(t, backfillActual)
require.Equal(t, expectedBackfill.Id, backfillActual.Id)
require.Equal(t, expectedBackfill.SearchFields, backfillActual.SearchFields)
require.Equal(t, expectedBackfill.Extensions, backfillActual.Extensions)
require.Equal(t, expectedBackfill.Generation, backfillActual.Generation)
require.Equal(t, expectedTicketIDs, tidsActual)
} else {
require.Nil(t, backfillActual)
require.Nil(t, tidsActual)
require.Error(t, errActual)
require.Equal(t, tc.expectedCode.String(), status.Convert(errActual).Code().String())
require.Contains(t, status.Convert(errActual).Message(), tc.expectedMessage)
}
})
}
// pass an expired context, err expected
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
bf, tids, err := service.GetBackfill(ctx, "12345")
require.Error(t, err)
require.Nil(t, bf)
require.Nil(t, tids)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "GetBackfill, id: 12345, failed to connect to redis:")
}
func TestDeleteBackfill(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
//Last Acknowledge timestamp is updated on Frontend CreateBackfill
bfID := "mockBackfillID"
err := service.CreateBackfill(ctx, &pb.Backfill{
Id: bfID,
Generation: 1,
}, nil)
require.NoError(t, err)
pool := GetRedisPool(cfg)
conn := pool.Get()
ts, err := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bfID))
require.NoError(t, err)
require.True(t, ts > 0, "timestamp is not valid")
var testCases = []struct {
description string
backfillID string
expectedCode codes.Code
expectedMessage string
}{
{
description: "backfill is found and deleted",
backfillID: bfID,
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "empty id passed, no err expected",
backfillID: "",
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
errActual := service.DeleteBackfill(ctx, tc.backfillID)
require.NoError(t, errActual)
if tc.backfillID != "" {
_, errGetTicket := service.GetTicket(ctx, tc.backfillID)
require.Error(t, errGetTicket)
require.Equal(t, codes.NotFound.String(), status.Convert(errGetTicket).Code().String())
// test that Backfill also deleted from last acknowledged sorted set
_, err = redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, tc.backfillID))
require.Error(t, err)
require.Equal(t, err.Error(), "redigo: nil returned")
}
})
}
// pass an expired context, err expected
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
err = service.DeleteBackfill(ctx, "12345")
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "DeleteBackfill, id: 12345, failed to connect to redis:")
}
// TestUpdateAcknowledgmentTimestampLifecycle test statestore functions - UpdateAcknowledgmentTimestamp, GetExpiredBackfillIDs
// and deleteExpiredBackfillID
func TestUpdateAcknowledgmentTimestampLifecycle(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
bf1 := "mockBackfillID"
bf2 := "mockBackfillID2"
err := service.CreateBackfill(ctx, &pb.Backfill{
Id: bf1,
Generation: 1,
}, nil)
require.NoError(t, err)
err = service.CreateBackfill(ctx, &pb.Backfill{
Id: bf2,
Generation: 1,
}, nil)
require.NoError(t, err)
bfIDs, err := service.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, bfIDs, 0)
pendingReleaseTimeout := cfg.GetDuration("pendingReleaseTimeout")
// Sleep till all Backfills expire
time.Sleep(pendingReleaseTimeout)
// This call also sets initial LastAcknowledge time
bfIDs, err = service.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, bfIDs, 2)
require.Contains(t, bfIDs, bf1)
require.Contains(t, bfIDs, bf2)
err = service.UpdateAcknowledgmentTimestamp(ctx, bf1)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bf1))
err = service.UpdateAcknowledgmentTimestamp(ctx, bf2)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bf2))
err = service.DeleteBackfill(ctx, bfIDs[0])
require.NoError(t, err)
bfIDs, err = service.GetExpiredBackfillIDs(ctx)
require.Len(t, bfIDs, 1)
require.NoError(t, err)
}
func TestUpdateAcknowledgmentTimestamp(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
startTime := time.Now()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
bf1 := "mockBackfillID"
err := service.CreateBackfill(ctx, &pb.Backfill{
Id: bf1,
Generation: 1,
}, nil)
require.NoError(t, err)
err = service.UpdateAcknowledgmentTimestamp(ctx, bf1)
require.NoError(t, err)
// Check that Acknowledge timestamp stored valid in Redis
pool := GetRedisPool(cfg)
conn := pool.Get()
res, err := redis.Int64(conn.Do("ZSCORE", backfillLastAckTime, bf1))
require.NoError(t, err)
// Create a time.Time from Unix nanoseconds and make sure, that time difference
// is less than one second
t2 := time.Unix(res/1e9, res%1e9)
require.True(t, t2.After(startTime), "UpdateAcknowledgmentTimestamp should update time to a more recent one")
}
func TestUpdateAcknowledgmentTimestamptExpiredBackfillErrExpected(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
bfID := "bf1"
bfLastAck := "backfill_last_ack_time"
// add expired but acknowledged backfill
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
require.NoError(t, err)
err = service.UpdateAcknowledgmentTimestamp(ctx, bfID)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), fmt.Sprintf("can not acknowledge an expired backfill, id: %s", bfID))
}
func TestUpdateAcknowledgmentTimestampConnectionError(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
bf1 := "mockBackfill"
ctx := utilTesting.NewContext(t)
cfg = createInvalidRedisConfig()
service = New(cfg)
require.NotNil(t, service)
err := service.UpdateAcknowledgmentTimestamp(ctx, bf1)
require.Error(t, err, "failed to connect to redis:")
}
func createInvalidRedisConfig() config.View {
cfg := viper.New()
cfg.Set("redis.hostname", "localhost")
cfg.Set("redis.port", 222)
return cfg
}
// TestGetExpiredBackfillIDs test statestore function GetExpiredBackfillIDs
func TestGetExpiredBackfillIDs(t *testing.T) {
// Prepare expired and normal BackfillIds in a Redis Sorted Set
cfg, closer := createRedis(t, false, "")
defer closer()
expID := "expired"
goodID := "fresh"
pool := GetRedisPool(cfg)
conn := pool.Get()
_, err := conn.Do("ZADD", backfillLastAckTime, 123, expID)
require.NoError(t, err)
_, err = conn.Do("ZADD", backfillLastAckTime, time.Now().UnixNano(), goodID)
require.NoError(t, err)
// GetExpiredBackfillIDs should return only expired BF
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
bfIDs, err := service.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, bfIDs, 1)
require.Equal(t, expID, bfIDs[0])
}
func TestIndexBackfill(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
t.Run("WithValidContext", func(t *testing.T) {
ctx := utilTesting.NewContext(t)
generateBackfills(ctx, t, service, 2)
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
idsIndexed, err := redis.Strings(c.Do("HKEYS", allBackfills))
require.NoError(t, err)
require.Len(t, idsIndexed, 2)
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
require.Equal(t, "mockBackfillID-1", idsIndexed[1])
})
t.Run("WithCancelledContext", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
err := service.IndexBackfill(ctx, &pb.Backfill{
Id: "12345",
Generation: 42,
})
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "IndexBackfill, id: 12345, failed to connect to redis:")
})
}
func TestDeindexBackfill(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
generateBackfills(ctx, t, service, 2)
c, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
idsIndexed, err := redis.Strings(c.Do("HKEYS", allBackfills))
require.NoError(t, err)
require.Len(t, idsIndexed, 2)
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
require.Equal(t, "mockBackfillID-1", idsIndexed[1])
// deindex and check that there is only 1 backfill in the returned slice
err = service.DeindexBackfill(ctx, "mockBackfillID-1")
require.NoError(t, err)
idsIndexed, err = redis.Strings(c.Do("HKEYS", allBackfills))
require.NoError(t, err)
require.Len(t, idsIndexed, 1)
require.Equal(t, "mockBackfillID-0", idsIndexed[0])
// pass an expired context, err expected
ctx, cancel := context.WithCancel(context.Background())
cancel()
service = New(cfg)
err = service.DeindexBackfill(ctx, "12345")
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "DeindexBackfill, id: 12345, failed to connect to redis:")
}
func TestGetIndexedBackfills(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
verifyBackfills := func(service Service, backfills []*pb.Backfill) {
ids, err := service.GetIndexedBackfills(ctx)
require.Nil(t, err)
require.Equal(t, len(backfills), len(ids))
for _, bf := range backfills {
gen, ok := ids[bf.GetId()]
require.Equal(t, bf.Generation, int64(gen))
require.True(t, ok)
}
}
// no indexed backfills exists
verifyBackfills(service, []*pb.Backfill{})
// two indexed backfills exists
backfills := generateBackfills(ctx, t, service, 2)
verifyBackfills(service, backfills)
// deindex one backfill, one backfill exist
err := service.DeindexBackfill(ctx, backfills[0].Id)
require.Nil(t, err)
verifyBackfills(service, backfills[1:2])
}
func generateBackfills(ctx context.Context, t *testing.T, service Service, amount int) []*pb.Backfill {
backfills := make([]*pb.Backfill, 0, amount)
for i := 0; i < amount; i++ {
tmp := &pb.Backfill{
Id: fmt.Sprintf("mockBackfillID-%d", i),
Generation: 1,
}
require.NoError(t, service.CreateBackfill(ctx, tmp, []string{}))
require.NoError(t, service.IndexBackfill(ctx, tmp))
backfills = append(backfills, tmp)
}
return backfills
}
func BenchmarkCleanupBackfills(b *testing.B) {
t := &testing.T{}
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
createStaleBF := func(bfID string, ticketIDs ...string) {
bf := &pb.Backfill{
Id: bfID,
Generation: 1,
}
err = service.CreateBackfill(ctx, bf, ticketIDs)
require.NoError(t, err)
_, err = rc.Do("ZADD", "backfill_last_ack_time", 123, bfID)
require.NoError(t, err)
err = service.AddTicketsToPendingRelease(ctx, ticketIDs)
require.NoError(t, err)
err = service.IndexBackfill(ctx, bf)
require.NoError(t, err)
}
for n := 0; n < b.N; n++ {
for i := 0; i < 50; i++ {
createStaleBF(fmt.Sprintf("b-%d", i), fmt.Sprintf("t1-%d", i), fmt.Sprintf("t1-%d", i+1))
}
err = service.CleanupBackfills(ctx)
require.NoError(t, err)
}
}
func TestCleanupBackfills(t *testing.T) {
cfg, closer := createRedis(t, false, "")
defer closer()
service := New(cfg)
require.NotNil(t, service)
defer service.Close()
ctx := utilTesting.NewContext(t)
rc, err := redis.Dial("tcp", fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port")))
require.NoError(t, err)
bfID := "mockBackfill-1"
ticketIDs := []string{"t1", "t2"}
bfLastAck := "backfill_last_ack_time"
proposedTicketIDs := "proposed_ticket_ids"
allBackfills := "allBackfills"
generation := int64(55)
bf := &pb.Backfill{
Id: bfID,
Generation: generation,
}
// ARRANGE
err = service.CreateBackfill(ctx, bf, ticketIDs)
require.NoError(t, err)
// add expired but acknowledged backfill
_, err = rc.Do("ZADD", bfLastAck, 123, bfID)
require.NoError(t, err)
err = service.AddTicketsToPendingRelease(ctx, ticketIDs)
require.NoError(t, err)
err = service.IndexBackfill(ctx, bf)
require.NoError(t, err)
// backfill is properly indexed
index, err := redis.StringMap(rc.Do("HGETALL", allBackfills))
require.NoError(t, err)
require.Len(t, index, 1)
require.Equal(t, strconv.Itoa(int(generation)), index[bfID])
// ACT
err = service.CleanupBackfills(ctx)
require.NoError(t, err)
// ASSERT
// backfill must be deindexed
index, err = redis.StringMap(rc.Do("HGETALL", allBackfills))
require.NoError(t, err)
require.Len(t, index, 0)
// backfill doesn't exist anymore
_, _, err = service.GetBackfill(ctx, bfID)
require.Error(t, err)
require.Equal(t, "Backfill id: mockBackfill-1 not found", status.Convert(err).Message())
// no records in backfill sorted set left
expiredBackfillIds, err := redis.Strings(rc.Do("ZRANGEBYSCORE", bfLastAck, 0, 200))
require.NoError(t, err)
require.Empty(t, expiredBackfillIds)
// no records in tickets sorted set left
pendingTickets, err := redis.Strings(rc.Do("ZRANGEBYSCORE", proposedTicketIDs, 0, time.Now().UnixNano()))
require.NoError(t, err)
require.Empty(t, pendingTickets)
}

View File

@ -77,7 +77,7 @@ func (is *instrumentedService) GetIndexedIDSet(ctx context.Context) (map[string]
return is.s.GetIndexedIDSet(ctx)
}
func (is *instrumentedService) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
func (is *instrumentedService) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, []*pb.Ticket, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateAssignments")
defer span.End()
return is.s.UpdateAssignments(ctx, req)
@ -89,16 +89,16 @@ func (is *instrumentedService) GetAssignments(ctx context.Context, id string, ca
return is.s.GetAssignments(ctx, id, callback)
}
func (is *instrumentedService) AddTicketsToIgnoreList(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.AddTicketsToIgnoreList")
func (is *instrumentedService) AddTicketsToPendingRelease(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.AddTicketsToPendingRelease")
defer span.End()
return is.s.AddTicketsToIgnoreList(ctx, ids)
return is.s.AddTicketsToPendingRelease(ctx, ids)
}
func (is *instrumentedService) DeleteTicketsFromIgnoreList(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteTicketsFromIgnoreList")
func (is *instrumentedService) DeleteTicketsFromPendingRelease(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteTicketsFromPendingRelease")
defer span.End()
return is.s.DeleteTicketsFromIgnoreList(ctx, ids)
return is.s.DeleteTicketsFromPendingRelease(ctx, ids)
}
func (is *instrumentedService) ReleaseAllTickets(ctx context.Context) error {
@ -106,3 +106,94 @@ func (is *instrumentedService) ReleaseAllTickets(ctx context.Context) error {
defer span.End()
return is.s.ReleaseAllTickets(ctx)
}
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist. The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization. Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
func (is *instrumentedService) CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.CreateBackfill")
defer span.End()
return is.s.CreateBackfill(ctx, backfill, ticketIDs)
}
// GetBackfill gets the Backfill with the specified id from state storage. This method fails if the Backfill does not exist. Returns the Backfill and associated ticketIDs if they exist.
func (is *instrumentedService) GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetBackfill")
defer span.End()
return is.s.GetBackfill(ctx, id)
}
// GetBackfills returns multiple backfills from storage.
func (is *instrumentedService) GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetBackfills")
defer span.End()
return is.s.GetBackfills(ctx, ids)
}
// DeleteBackfill removes the Backfill with the specified id from state storage. This method succeeds if the Backfill does not exist.
func (is *instrumentedService) DeleteBackfill(ctx context.Context, id string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteBackfill")
defer span.End()
return is.s.DeleteBackfill(ctx, id)
}
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
func (is *instrumentedService) UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateBackfill")
defer span.End()
return is.s.UpdateBackfill(ctx, backfill, ticketIDs)
}
// NewMutex returns a new distributed mutex with given name
func (is *instrumentedService) NewMutex(key string) RedisLocker {
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.NewMutex")
defer span.End()
return is.s.NewMutex(key)
}
// UpdateAcknowledgmentTimestamp stores Backfill's last acknowledged time
func (is *instrumentedService) UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateAcknowledgmentTimestamp")
defer span.End()
return is.s.UpdateAcknowledgmentTimestamp(ctx, id)
}
// GetExpiredBackfillIDs - get all backfills which are expired
func (is *instrumentedService) GetExpiredBackfillIDs(ctx context.Context) ([]string, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetExpiredBackfillIDs")
defer span.End()
return is.s.GetExpiredBackfillIDs(ctx)
}
// IndexBackfill adds the backfill to the index.
func (is *instrumentedService) IndexBackfill(ctx context.Context, backfill *pb.Backfill) error {
_, span := trace.StartSpan(ctx, "statestore/instrumented.IndexBackfill")
defer span.End()
return is.s.IndexBackfill(ctx, backfill)
}
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
func (is *instrumentedService) DeindexBackfill(ctx context.Context, id string) error {
_, span := trace.StartSpan(ctx, "statestore/instrumented.DeindexBackfill")
defer span.End()
return is.s.DeindexBackfill(ctx, id)
}
// GetIndexedBackfills returns the ids of all backfills currently indexed.
func (is *instrumentedService) GetIndexedBackfills(ctx context.Context) (map[string]int, error) {
_, span := trace.StartSpan(ctx, "statestore/instrumented.GetIndexedBackfills")
defer span.End()
return is.s.GetIndexedBackfills(ctx)
}
// CleanupBackfills removes expired backfills
func (is *instrumentedService) CleanupBackfills(ctx context.Context) error {
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.CleanupBackfills")
defer span.End()
return is.s.CleanupBackfills(ctx)
}
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
func (is *instrumentedService) DeleteBackfillCompletely(ctx context.Context, id string) error {
_, span := trace.StartSpan(context.Background(), "statestore/instrumented.DeleteBackfillCompletely")
defer span.End()
return is.s.DeleteBackfillCompletely(ctx, id)
}

View File

@ -27,13 +27,20 @@ type Service interface {
// HealthCheck indicates if the database is reachable.
HealthCheck(ctx context.Context) error
// Closes the connection to the underlying storage.
Close() error
// Ticket
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
CreateTicket(ctx context.Context, ticket *pb.Ticket) error
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
// GetTicket gets the Ticket with the specified id from state storage.
// This method fails if the Ticket does not exist.
GetTicket(ctx context.Context, id string) (*pb.Ticket, error)
// DeleteTicket removes the Ticket with the specified id from state storage. This method succeeds if the Ticket does not exist.
// DeleteTicket removes the Ticket with the specified id from state storage.
// This method succeeds if the Ticket does not exist.
DeleteTicket(ctx context.Context, id string) error
// IndexTicket adds the ticket to the index.
@ -45,27 +52,71 @@ type Service interface {
// GetIndexedIDSet returns the ids of all tickets currently indexed.
GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error)
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
// GetTickets returns multiple tickets from storage.
// Missing tickets are silently ignored.
GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error)
// UpdateAssignments update using the request's specified tickets with assignments.
UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error)
UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, []*pb.Ticket, error)
// GetAssignments returns the assignment associated with the input ticket id
// GetAssignments returns the assignment associated with the input ticket id.
GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error
// AddProposedTickets appends new proposed tickets to the proposed sorted set with current timestamp
AddTicketsToIgnoreList(ctx context.Context, ids []string) error
// AddTicketsToPendingRelease appends new proposed tickets to the proposed sorted set with current timestamp.
AddTicketsToPendingRelease(ctx context.Context, ids []string) error
// DeleteTicketsFromIgnoreList deletes tickets from the proposed sorted set
DeleteTicketsFromIgnoreList(ctx context.Context, ids []string) error
// DeleteTicketsFromPendingRelease deletes tickets from the proposed sorted set.
DeleteTicketsFromPendingRelease(ctx context.Context, ids []string) error
// ReleaseAllTickets releases all pending tickets back to active
// ReleaseAllTickets releases all pending tickets back to active.
ReleaseAllTickets(ctx context.Context) error
// Closes the connection to the underlying storage.
Close() error
// Backfill
// CreateBackfill creates a new Backfill in the state storage if one doesn't exist.
// The xids algorithm used to create the ids ensures that they are unique with no system wide synchronization.
// Calling clients are forbidden from choosing an id during create. So no conflicts will occur.
CreateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error
// GetBackfill gets the Backfill with the specified id from state storage.
// This method fails if the Backfill does not exist.
// Returns the Backfill and asossiated ticketIDs if they exist.
GetBackfill(ctx context.Context, id string) (*pb.Backfill, []string, error)
// GetBackfills returns multiple backfills from storage
GetBackfills(ctx context.Context, ids []string) ([]*pb.Backfill, error)
// DeleteBackfill removes the Backfill with the specified id from state storage.
// This method succeeds if the Backfill does not exist.
DeleteBackfill(ctx context.Context, id string) error
// DeleteBackfillCompletely performs a set of operations to remove backfill and all related entities.
DeleteBackfillCompletely(ctx context.Context, id string) error
// UpdateBackfill updates an existing Backfill with a new data. ticketIDs can be nil.
UpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIDs []string) error
// NewMutex returns an interface of a new distributed mutex with given name
NewMutex(key string) RedisLocker
// CleanupBackfills removes expired backfills
CleanupBackfills(ctx context.Context) error
// UpdateAcknowledgmentTimestamp updates Backfill's last acknowledged time
UpdateAcknowledgmentTimestamp(ctx context.Context, id string) error
// GetExpiredBackfillIDs gets all backfill IDs which are expired
GetExpiredBackfillIDs(ctx context.Context) ([]string, error)
// IndexBackfill adds the backfill to the index.
IndexBackfill(ctx context.Context, backfill *pb.Backfill) error
// DeindexBackfill removes specified Backfill ID from the index. The Backfill continues to exist.
DeindexBackfill(ctx context.Context, id string) error
// GetIndexedBackfills returns a map containing the IDs and
// the Generation number of the backfills currently indexed.
GetIndexedBackfills(ctx context.Context) (map[string]int, error)
}
// New creates a Service based on the configuration.
@ -78,3 +129,9 @@ func New(cfg config.View) Service {
}
return s
}
// RedisLocker provides methods to use distributed locks against redis
type RedisLocker interface {
Lock(ctx context.Context) error
Unlock(ctx context.Context) (bool, error)
}

View File

@ -20,30 +20,46 @@ import (
"io/ioutil"
"time"
"github.com/cenkalti/backoff"
"github.com/golang/protobuf/proto"
rs "github.com/go-redsync/redsync/v4"
rsredigo "github.com/go-redsync/redsync/v4/redis/redigo"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/pkg/pb"
)
const allTickets = "allTickets"
var (
redisLogger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "statestore.redis",
})
// this field is used to create new mutexes
redsync *rs.Redsync
)
// NewMutex returns a new distributed mutex with given name
func (rb *redisBackend) NewMutex(key string) RedisLocker {
m := redsync.NewMutex(fmt.Sprintf("lock/%s", key), rs.WithExpiry(rb.cfg.GetDuration("backfillLockTimeout")))
return redisBackend{mutex: m}
}
//Lock locks r. In case it returns an error on failure, you may retry to acquire the lock by calling this method again.
func (rb redisBackend) Lock(ctx context.Context) error {
return rb.mutex.LockContext(ctx)
}
// Unlock unlocks r and returns the status of unlock.
func (rb redisBackend) Unlock(ctx context.Context) (bool, error) {
return rb.mutex.UnlockContext(ctx)
}
type redisBackend struct {
healthCheckPool *redis.Pool
redisPool *redis.Pool
cfg config.View
mutex *rs.Mutex
}
// Close the connection to the database.
@ -53,9 +69,11 @@ func (rb *redisBackend) Close() error {
// newRedis creates a statestore.Service backed by Redis database.
func newRedis(cfg config.View) Service {
pool := GetRedisPool(cfg)
redsync = rs.New(rsredigo.NewPool(pool))
return &redisBackend{
healthCheckPool: getHealthCheckPool(cfg),
redisPool: GetRedisPool(cfg),
redisPool: pool,
cfg: cfg,
}
}
@ -81,7 +99,7 @@ func getHealthCheckPool(cfg config.View) *redis.Pool {
Wait: true,
TestOnBorrow: testOnBorrow,
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
if ctx != nil && ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(healthCheckURL, redis.DialConnectTimeout(healthCheckTimeout), redis.DialReadTimeout(healthCheckTimeout))
@ -99,7 +117,7 @@ func GetRedisPool(cfg config.View) *redis.Pool {
if cfg.IsSet("redis.sentinelHostname") {
sentinelPool := getSentinelPool(cfg)
dialFunc = func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
if ctx != nil && ctx.Err() != nil {
return nil, ctx.Err()
}
@ -126,7 +144,7 @@ func GetRedisPool(cfg config.View) *redis.Pool {
masterAddr := getMasterAddr(cfg)
masterURL := redisURLFromAddr(masterAddr, cfg, cfg.GetBool("redis.usePassword"))
dialFunc = func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
if ctx != nil && ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(masterURL, redis.DialConnectTimeout(idleTimeout), redis.DialReadTimeout(idleTimeout))
@ -157,7 +175,7 @@ func getSentinelPool(cfg config.View) *redis.Pool {
Wait: true,
TestOnBorrow: testOnBorrow,
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
if ctx != nil && ctx.Err() != nil {
return nil, ctx.Err()
}
redisLogger.WithField("sentinelAddr", sentinelAddr).Debug("Attempting to connect to Redis Sentinel")
@ -220,452 +238,6 @@ func redisURLFromAddr(addr string, cfg config.View, usePassword bool) string {
return redisURL + addr
}
func (rb *redisBackend) connect(ctx context.Context) (redis.Conn, error) {
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("failed to connect to redis")
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
return redisConn, nil
}
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
value, err := proto.Marshal(ticket)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": ticket.GetId(),
"error": err.Error(),
}).Error("failed to marshal the ticket proto")
return status.Errorf(codes.Internal, "%v", err)
}
_, err = redisConn.Do("SET", ticket.GetId(), value)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SET",
"key": ticket.GetId(),
"error": err.Error(),
}).Error("failed to set the value for ticket")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
func (rb *redisBackend) GetTicket(ctx context.Context, id string) (*pb.Ticket, error) {
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
value, err := redis.Bytes(redisConn.Do("GET", id))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "GET",
"key": id,
"error": err.Error(),
}).Error("failed to get the ticket from state storage")
// Return NotFound if redigo did not find the ticket in storage.
if err == redis.ErrNil {
msg := fmt.Sprintf("Ticket id:%s not found", id)
redisLogger.WithFields(logrus.Fields{
"key": id,
"cmd": "GET",
}).Error(msg)
return nil, status.Error(codes.NotFound, msg)
}
return nil, status.Errorf(codes.Internal, "%v", err)
}
if value == nil {
msg := fmt.Sprintf("Ticket id:%s not found", id)
redisLogger.WithFields(logrus.Fields{
"key": id,
"cmd": "GET",
}).Error(msg)
return nil, status.Error(codes.NotFound, msg)
}
ticket := &pb.Ticket{}
err = proto.Unmarshal(value, ticket)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": id,
"error": err.Error(),
}).Error("failed to unmarshal the ticket proto")
return nil, status.Errorf(codes.Internal, "%v", err)
}
return ticket, nil
}
// DeleteTicket removes the Ticket with the specified id from state storage.
func (rb *redisBackend) DeleteTicket(ctx context.Context, id string) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
_, err = redisConn.Do("DEL", id)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "DEL",
"key": id,
"error": err.Error(),
}).Error("failed to delete the ticket from state storage")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// IndexTicket indexes the Ticket id for the configured index fields.
func (rb *redisBackend) IndexTicket(ctx context.Context, ticket *pb.Ticket) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("SADD", allTickets, ticket.Id)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SADD",
"ticket": ticket.GetId(),
"error": err.Error(),
"key": allTickets,
}).Error("failed to add ticket to all tickets")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// DeindexTicket removes the indexing for the specified Ticket. Only the indexes are removed but the Ticket continues to exist.
func (rb *redisBackend) DeindexTicket(ctx context.Context, id string) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("SREM", allTickets, id)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SREM",
"key": allTickets,
"id": id,
"error": err.Error(),
}).Error("failed to remove ticket from all tickets")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// GetIndexedIds returns the ids of all tickets currently indexed.
func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
ttl := rb.cfg.GetDuration("pendingReleaseTimeout")
curTime := time.Now()
endTimeInt := curTime.Add(time.Hour).UnixNano()
startTimeInt := curTime.Add(-ttl).UnixNano()
// Filter out tickets that are fetched but not assigned within ttl time (ms).
idsInIgnoreLists, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", "proposed_ticket_ids", startTimeInt, endTimeInt))
if err != nil {
redisLogger.WithError(err).Error("failed to get proposed tickets")
return nil, status.Errorf(codes.Internal, "error getting ignore list %v", err)
}
idsIndexed, err := redis.Strings(redisConn.Do("SMEMBERS", allTickets))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"Command": "SMEMBER allTickets",
}).WithError(err).Error("Failed to lookup all tickets.")
return nil, status.Errorf(codes.Internal, "error getting all indexed ticket ids %v", err)
}
r := make(map[string]struct{}, len(idsIndexed))
for _, id := range idsIndexed {
r[id] = struct{}{}
}
for _, id := range idsInIgnoreLists {
delete(r, id)
}
return r, nil
}
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
func (rb *redisBackend) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
if len(ids) == 0 {
return nil, nil
}
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
queryParams := make([]interface{}, len(ids))
for i, id := range ids {
queryParams[i] = id
}
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"Command": fmt.Sprintf("MGET %v", ids),
}).WithError(err).Error("Failed to lookup tickets.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
r := make([]*pb.Ticket, 0, len(ids))
for i, b := range ticketBytes {
// Tickets may be deleted by the time we read it from redis.
if b != nil {
t := &pb.Ticket{}
err = proto.Unmarshal(b, t)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": ids[i],
}).WithError(err).Error("Failed to unmarshal ticket from redis.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
r = append(r, t)
}
}
return r, nil
}
// UpdateAssignments update using the request's specified tickets with assignments.
func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp := &pb.AssignTicketsResponse{}
if len(req.Assignments) == 0 {
return resp, nil
}
idToA := make(map[string]*pb.Assignment)
ids := make([]string, 0)
idsI := make([]interface{}, 0)
for _, a := range req.Assignments {
if a.Assignment == nil {
return nil, status.Error(codes.InvalidArgument, "AssignmentGroup.Assignment is required")
}
for _, id := range a.TicketIds {
if _, ok := idToA[id]; ok {
return nil, status.Errorf(codes.InvalidArgument, "Ticket id %s is assigned multiple times in one assign tickets call.", id)
}
idToA[id] = a.Assignment
ids = append(ids, id)
idsI = append(idsI, id)
}
}
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", idsI...))
if err != nil {
return nil, err
}
tickets := make([]*pb.Ticket, 0, len(ticketBytes))
for i, ticketByte := range ticketBytes {
// Tickets may be deleted by the time we read it from redis.
if ticketByte == nil {
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
TicketId: ids[i],
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
})
} else {
t := &pb.Ticket{}
err = proto.Unmarshal(ticketByte, t)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": ids[i],
}).WithError(err).Error("failed to unmarshal ticket from redis.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
tickets = append(tickets, t)
}
}
assignmentTimeout := rb.cfg.GetDuration("assignedDeleteTimeout") / time.Millisecond
err = redisConn.Send("MULTI")
if err != nil {
return nil, errors.Wrap(err, "error starting redis multi")
}
for _, ticket := range tickets {
ticket.Assignment = idToA[ticket.Id]
var ticketByte []byte
ticketByte, err = proto.Marshal(ticket)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to marshal ticket %s", ticket.GetId())
}
err = redisConn.Send("SET", ticket.Id, ticketByte, "PX", int64(assignmentTimeout), "XX")
if err != nil {
return nil, errors.Wrap(err, "error sending ticket assignment set")
}
}
wasSet, err := redis.Values(redisConn.Do("EXEC"))
if err != nil {
return nil, errors.Wrap(err, "error executing assignment set")
}
if len(wasSet) != len(tickets) {
return nil, status.Errorf(codes.Internal, "sent %d tickets to redis, but received %d back", len(tickets), len(wasSet))
}
for i, ticket := range tickets {
v, err := redis.String(wasSet[i], nil)
if err == redis.ErrNil {
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
TicketId: ticket.Id,
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
})
continue
}
if err != nil {
return nil, errors.Wrap(err, "unexpected error from redis multi set")
}
if v != "OK" {
return nil, status.Errorf(codes.Internal, "unexpected response from redis: %s", v)
}
}
return resp, nil
}
// GetAssignments returns the assignment associated with the input ticket id
func (rb *redisBackend) GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
backoffOperation := func() error {
var ticket *pb.Ticket
ticket, err = rb.GetTicket(ctx, id)
if err != nil {
redisLogger.WithError(err).Errorf("failed to get ticket %s when executing get assignments", id)
return backoff.Permanent(err)
}
err = callback(ticket.GetAssignment())
if err != nil {
return backoff.Permanent(err)
}
return status.Error(codes.Unavailable, "listening on assignment updates, waiting for the next backoff")
}
err = backoff.Retry(backoffOperation, rb.newConstantBackoffStrategy())
if err != nil {
return err
}
return nil
}
// AddProposedTickets appends new proposed tickets to the proposed sorted set with current timestamp
func (rb *redisBackend) AddTicketsToIgnoreList(ctx context.Context, ids []string) error {
if len(ids) == 0 {
return nil
}
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
currentTime := time.Now().UnixNano()
cmds := make([]interface{}, 0, 2*len(ids)+1)
cmds = append(cmds, "proposed_ticket_ids")
for _, id := range ids {
cmds = append(cmds, currentTime, id)
}
_, err = redisConn.Do("ZADD", cmds...)
if err != nil {
redisLogger.WithError(err).Error("failed to append proposed tickets to ignore list")
return status.Error(codes.Internal, err.Error())
}
return nil
}
// DeleteTicketsFromIgnoreList deletes tickets from the proposed sorted set
func (rb *redisBackend) DeleteTicketsFromIgnoreList(ctx context.Context, ids []string) error {
if len(ids) == 0 {
return nil
}
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
cmds := make([]interface{}, 0, len(ids)+1)
cmds = append(cmds, "proposed_ticket_ids")
for _, id := range ids {
cmds = append(cmds, id)
}
_, err = redisConn.Do("ZREM", cmds...)
if err != nil {
redisLogger.WithError(err).Error("failed to delete proposed tickets from ignore list")
return status.Error(codes.Internal, err.Error())
}
return nil
}
func (rb *redisBackend) ReleaseAllTickets(ctx context.Context) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
_, err = redisConn.Do("DEL", "proposed_ticket_ids")
return err
}
func handleConnectionClose(conn *redis.Conn) {
err := (*conn).Close()
if err != nil {
@ -674,20 +246,3 @@ func handleConnectionClose(conn *redis.Conn) {
}).Debug("failed to close redis client connection.")
}
}
func (rb *redisBackend) newConstantBackoffStrategy() backoff.BackOff {
backoffStrat := backoff.NewConstantBackOff(rb.cfg.GetDuration("backoff.initialInterval"))
return backoff.BackOff(backoffStrat)
}
// TODO: add cache the backoff object
// nolint: unused
func (rb *redisBackend) newExponentialBackoffStrategy() backoff.BackOff {
backoffStrat := backoff.NewExponentialBackOff()
backoffStrat.InitialInterval = rb.cfg.GetDuration("backoff.initialInterval")
backoffStrat.RandomizationFactor = rb.cfg.GetFloat64("backoff.randFactor")
backoffStrat.Multiplier = rb.cfg.GetFloat64("backoff.multiplier")
backoffStrat.MaxInterval = rb.cfg.GetDuration("backoff.maxInterval")
backoffStrat.MaxElapsedTime = rb.cfg.GetDuration("backoff.maxElapsedTime")
return backoff.BackOff(backoffStrat)
}

Some files were not shown because too many files have changed in this diff Show More