Compare commits

...

116 Commits

Author SHA1 Message Date
d781be1a3c Bump golang.org/x/net to v0.25.0 () 2024-07-10 11:11:56 +09:00
ea02b531ad Bump google.golang.org/protobuf to v1.34.1 () 2024-06-25 16:34:19 +09:00
02aa4781ae Removal of myself from CODEOWNERS () 2024-06-10 08:14:12 +00:00
493b702723 Fix 404 link for stackdriver () 2024-06-06 09:36:29 +09:00
4eb2ff5e62 fix metrics for backfills per query () 2024-02-28 16:18:54 +09:00
879e2efcc4 Security upgrade golang.org/x/crypto from 0.14.0 to 0.17.0 () 2024-01-29 13:18:23 +09:00
3b3d49b1fe Updated go 1.21.0 to 1.21.5 ()
* Updated go 1.21 to 1.21.5

* Fixed golangci

* Reverted GOLANGCI
2023-12-13 18:02:18 +09:00
4dca40d85f Updated to 1.8.1 ()
Changed release semantic versions: 1.8.1
2023-12-12 10:03:37 +09:00
c58996da62 Update google.golang.org/grpc from 1.57.0 to 1.57.1 () 2023-11-07 09:08:18 +09:00
8474eca3cc Bumps golang.org/x/net from 0.7.0 to 0.17.0 () 2023-11-06 18:34:51 +09:00
2ac12b1c83 Update CODEOWNERS ()
* Update CODEOWNERS

* Add @ashutosji to CODEOWNERS
2023-11-01 10:42:38 +09:00
dbda6c8dc1 Merge Release 1.8 into Main ()
* Release 1.8-rc1 ()

* Release 1.8 ()
2023-10-21 11:05:07 +09:00
98e7a02ebf Go deps version upgrade ()
* go deps version upgrade

* make presubmit changes
2023-03-02 16:51:28 +09:00
3d1bae2021 removing deprecated psp () 2023-02-24 16:37:33 +05:30
7070f056df Bump github.com/prometheus/client_golang from 1.8.0 to 1.11.1 ()
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.8.0 to 1.11.1.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.8.0...v1.11.1)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-02-15 19:39:39 +05:30
4c2544f043 Update k8s.io packages ()
* update supported version of k8s.io/client-go

* update tutorial deps

* add context
2023-02-11 11:50:07 +05:30
2e6aa4f36f adding Mark and Joseph () 2023-02-09 20:31:37 +05:30
50b4063bee add Content-Type and Transfer-Encoding to matchfunction:run POST request () 2023-01-27 15:06:03 +05:30
31a4a45d73 Bump github.com/gogo/protobuf from 1.3.1 to 1.3.2 ()
Bumps [github.com/gogo/protobuf](https://github.com/gogo/protobuf) from 1.3.1 to 1.3.2.
- [Release notes](https://github.com/gogo/protobuf/releases)
- [Commits](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2)

---
updated-dependencies:
- dependency-name: github.com/gogo/protobuf
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-01-17 15:19:36 -05:00
67be35006c update to google.golang.org/protobuf and deprecation changes ()
* update to google.golang.org/protobuf and deprecation changes

* update go version to 1.17

* updating deprecated grpc.Insecure() to insecure.NewCredential()

* update go to 1.18 to solve dependency issue

* lint disable typecheck

* adding create ticket test case condition

* limiting number of parallel tests

* tutorials dependency update

* update grpc protobuf files

* update go version  to 1.19.3

* update go version to 1.19 in tutorials

* added command in make file to update tutorial-deps

* update deps

* update tutorial-deps

* add gke-gcloud-auth-plugin

* command make tutorial-deps

* fix tutorial-deps

* fix go.sum file

* fix deps
2023-01-17 13:58:56 -05:00
d0ce1b317f update helm chart repo link () 2023-01-05 12:43:20 -05:00
93cd5c7a9f Added Redis Enterprise Deploy Instructions () 2022-12-16 16:55:45 -05:00
3193921816 Update CODEOWNERS ()
Add Andrew Grundy as CODEOWNER

Co-authored-by: Mridul Goswami <mridulgoswami@google.com>
2022-12-14 15:33:39 +05:30
7a3bb82089 Add Redis Enterprise tutorial for Open Match ()
* Add tutorial for Open Match coupled with Redis Enterprise for data-layer

* Fixed namespace and removed duplicate command

* update formatting and additonal step to wipe cluster of previous open match core install

* Update README.md

removed TODO

* make changes to solution/matchfunction/matchfunction.yaml for the correct namespace
2022-12-13 14:10:44 -05:00
a4eb6d6cbd add logging level configuration ()
Allow users to set logging level to anything other than hard coded debug

Co-authored-by: Mridul Goswami <mridulgoswami@google.com>
2022-12-12 19:34:18 +05:30
927a976a10 shifted e2e tests to project root () 2022-12-05 10:13:38 -05:00
33efc848ff Add open-match-override setting ()
* Add open-match-override setting

* Added enabled

Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
Co-authored-by: Mridul Goswami <mridulgoswami@google.com>
2022-12-05 13:58:29 +00:00
04c019c6cb Default values of configs ()
* setting validation and default values of configs

* config check in internal/config package
2022-12-02 12:10:42 -05:00
1e51ad859c specify hpa for individual service () 2022-11-01 10:29:46 -04:00
fdd8783a34 Ticket metrics panels ()
* change in calculation of active tickets

* grafana panels for new ticket metrics

* updated create cluster and proxy commands
2022-10-04 14:09:17 -04:00
036be6455d Added metrics for ticket behavior () ()
* Added metrics for total number of tickets and total number of backfills (,  of proposed metrics)

* Fixed totalBackfillTicketsView Name

* Added metric for keeping track of tickets in pending state

* altered name of total tickets to total 'active' tickets to remove confusion

* updated pending tickets metric name

* Register totalActiveTicketsView and pendingTotalTicketsView
2022-09-21 12:38:47 -04:00
5d5f4de7a7 lower GOLANG_TEST_COUNT to 3 which allows test to pass locally. Patch fix for now () 2022-08-26 13:39:17 +05:30
a9f985d217 Add custom annotations to Service Account ()
Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-08-23 14:01:11 -04:00
6598a55e74 Added persistent field to store any config/metadata in ticket and backfill ()
* update persistent field when updating whole backfill

* Added persistent field in ticket and backfill
2022-08-18 01:27:34 -04:00
4d6da1632a Updated github.com/gogo/protobuf due to security vulnerability () 2022-08-02 13:41:53 -04:00
40a06447d0 Fix typo ()
Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
Co-authored-by: Mridul Goswami <mridulgoswami@google.com>
2022-07-18 22:48:47 +05:30
a9d122f50c Update WatchAssignment function ()
* removed for loop from watchassignment function and shifted ctx.Done to callback function

* update  GKE version to regular supported
2022-07-18 11:55:38 -04:00
73ec73f2e8 add mridulji as codeowner () 2022-06-27 20:55:21 +05:30
361f8ff3db Added step to release template to update tutorial references for current version () 2022-06-22 12:12:44 -04:00
8297cac2b8 Set default value of assignedDeleteTimeout ()
Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-06-21 12:44:42 -04:00
120a114647 Using uuid instead time value to make unique matchId. ()
* use uuid for matchId instead time value because matchfunction seems to be called concurrently so I got 'multiple match functions used same match_id:' errors.

* use uuid for matchId instead time value because matchfunction seems to be called concurrently so I got 'multiple match functions used same match_id:' errors.

* Revert "use uuid for matchId instead time value because matchfunction seems to be called concurrently so I got 'multiple match functions used same match_id:' errors."

This reverts commit 99b4e92ab9f1bc44feae3475702e769c83320f5a.

* use uuid for matchId instead time value because matchfunction seems t…
o be called concurrently so I got 'multiple match functions used same match_id:' errors.

Co-authored-by: Mridul Goswami <mridulgoswami@google.com>
Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-06-21 11:13:30 -04:00
7af54ee1bc update telemetry helm chart versions and gke version ()
* update telemetry helm chart versions and gke version

* split configmaps for grafana dashboards

* splitting grafana dashboard configMaps by reading filenames

* renaming label grafana dashboard for converting into string

Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-06-21 10:33:09 -04:00
68cecb91e5 Adjust Helm README spaces for portType ClusterIP ()
Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-06-07 15:47:07 -04:00
67dc60dba8 Update bugreport.md ()
Remove unnecessary colon
2022-06-07 13:37:03 -04:00
09d1ff7171 fixing 404 response parser () 2022-06-07 11:32:30 -04:00
b6e5114715 Redis helm chart version change to 16.3.1 ()
* upgrade helm version

* upgrade redis chart version

* required changes for latest redis chart version

Co-authored-by: Jon Foust <38893532+syntxerror@users.noreply.github.com>
2022-03-03 11:38:31 -05:00
23d2fd5042 Update CODEOWNERS 2022-03-03 10:06:51 -05:00
2b73d52e0c AssignTickets empty check and test cases added () 2022-02-07 09:52:16 -05:00
47c34587dc docker build optimization by using mount cache for go dependencies () 2022-01-25 19:31:45 -05:00
76937b6350 Redis default values update ()
I have set resource requests against each component part of redis and set the 'slaveCount' to 3 (as this actually sets the total number of pods and a minimum of 3 is required for a robust Redis Sentinel deployment, see: https://redis.io/topics/sentinel#fundamental-things-to-know-about-sentinel-before-deploying)
2021-11-29 11:06:48 -05:00
2e03c1a197 fix outdate apiVersion ()
Recent k8s APIs remove Deployment from extensions/v1beta1, it's now in apps/v1
2021-09-22 13:05:31 -04:00
eca40e3298 re-enable workload identity () 2021-08-24 00:21:56 -04:00
902c9d69b4 Update development.md ()
update to new main branch naming convention.
2021-08-23 21:27:16 -04:00
67767cf1cd updated default gke version. updated grpc version in go.mod files () 2021-07-28 16:29:42 -04:00
6f46731b15 Respond to AcknowledgeBackfill with the tickets that were assigned ()
Fixes 
2021-06-09 13:15:28 -04:00
0d1a77c5de add andrewgrundy as codeowner () 2021-04-29 20:36:12 -04:00
f2a23f5ba1 add mode to profile name for range of game modes () 2021-04-16 17:01:24 -04:00
3fa588c1f8 Add backfill scenario to scale tests ()
* Implement backfill querying

* Update location for stable and incubator charts

* Add MMF backfill example

* Simplify MMF backfill example

* Add backfill scenario to scale tests

* Update backfill scenario

* Improve backfill scenario

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-04-13 17:17:52 -04:00
cc08f39205 Sentinel fix (2) ()
* update master

* fixing config.json

* add override for sentinel.usePassword

* Update go.sum

removed leftover from conflict
2021-04-07 14:35:50 -04:00
ec9cf00bcf Revert "Sentinel fix ()" ()
This reverts commit 8b8617f68d5aec70b1016912d07acfe31e3d12ab.
2021-04-05 13:03:23 -04:00
8b8617f68d Sentinel fix ()
* update master

* override sentinel.usePassword to false

Co-authored-by: jonfoust <38893532+jonfoust@users.noreply.github.com>
2021-04-03 03:22:03 -04:00
ce9b989e58 Update to gRPC Gateway v2 () 2021-03-22 12:20:34 -04:00
5c00395c78 Updating jonfoust username to syntxerror as a code reviewer ()
Co-authored-by: jonfoust <38893532+jonfoust@users.noreply.github.com>
2021-03-19 15:26:22 -07:00
faf3eded1f Return 404 when deleting ticket/backfill ticket that does not exist () 2021-02-16 15:36:23 -05:00
250d44aefd Fix WatchAssignments causes memory leaks () 2021-01-29 16:02:23 -08:00
13fdf5960f Make tests output readable () 2021-01-27 16:33:31 +03:00
aa5a1f9da1 Fix minor typos () 2021-01-22 01:49:07 +03:00
ad1ca16218 Add string err comparisons to backfill e2e ()
Make failure output more readable.
2021-01-20 17:00:56 +03:00
7d849f3f04 Backfill: Skip not found errors on Backend ()
Backfill: Skip not found errors on Backend
There could be the case when backfill returned by the MMF was deleted
in CleanupBackfills.
Add UT to check that error was skipped
2021-01-20 01:30:01 +03:00
05c8c8aa76 Fix leftover after 1080 PR () 2021-01-19 12:48:15 -08:00
f50c9eec80 Minor fixing some typos () 2021-01-19 11:26:17 -08:00
c6f23f01ca Improve proto comments () 2021-01-19 11:05:21 -08:00
21efdb6691 Move Cleanup Backfills after main SynchronizerCycle & add workers pool ()
Use workers in cleanup process. Move backfill cleanup to the end of sync cycle.
TestCleanUpExpiredBackfills call FetchMatches twice.
2021-01-19 10:43:08 +03:00
81a1dc38b6 add fix in helm chart to use custom redis instance ()
Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-18 16:52:22 +03:00
d0ddf22658 Expired backfills can not be updated or acknowledged ()
* do not acknowledge expired backfills

* use NoError

* parse ZSCORE response to float, not to int

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-18 15:48:54 +03:00
ee247c6c1a Updated release steps. Added additional step to publish release notes to OM Blog () 2021-01-15 17:26:56 -05:00
a17eb3bc72 Fix proto comments for better markdown output () 2021-01-15 11:26:14 -08:00
3d194f541e Add help comments in Makefile ()
* Add help comments in Makefile

* Delete utilities subtitle

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>

* Reorder subtitle definition

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2021-01-14 16:43:51 +03:00
3a0cd7611b Move the Redis chart to bitnami as update to 12.3.3 () 2021-01-12 11:58:52 -08:00
c13b461795 Make redis lock expiration configurable () 2021-01-07 13:29:27 -08:00
b9e55fc727 Add pod tolerations, nodeSelector and affinity in helm for subcharts ()
Fix 

Co-authored-by: Scott Redig <sredig@google.com>
2021-01-07 13:05:30 -08:00
dd1386a55b Clean up expired backfills ()
Add `CleanupBackfills()` call to synchronizer.
Put delete backfill logic to statestore.
Add mutex to DeleteBackfillCompletely and update deleteBackfill test.
Remove goroutine.
* use new context in CleanupBackfills().
* move cleanup to the start of the Synchronizer sync cycle.
Co-authored-by: Alexander <alexander.apalikov@globant.com>
2020-12-30 15:41:33 +03:00
defac9065b Frontend acknowledge backfill ()
* Frontend: Add AcknowledgeBackfill method
Update Tickets associated with backfill, remove all assigned

Add Mutex lock, UpdateBackfill accordingly after UpdateAssignments call.
New function name seems more reasonable as it do only Redis timestamp
updates, doUpdateAcknowledgmentTimestamp.

* Add Generation autoincrement test in test helper func
Add more logic as in doAssign() function
Deindex tickets and add error logging for all NotFound
tickets.
2020-12-28 16:37:29 +03:00
f203384fbf Add comments to MMF backfill example ()
Add comments to MatchMaking Function with Backfill example.
It creates matches with Backfills first, then full matches with 1 vs 1 player match, and if number of players left is 1 create a match with new Backfill in it.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 22:14:53 +03:00
7ef9c052bd Update backend service ()
Add missing Backfill indexing on Create or Update Backfill on backend.
Release tickets when backfill generation mismatch happens
Refactored - new doRelease() function for tickets.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 21:43:00 +03:00
ea744b8b51 Fix install-scale-chart target ()
* Implement backfill querying

* Update location for stable and incubator charts

* Add MMF backfill example

* Simplify MMF backfill example

* Render jaeger configuration if it is enabled

Helm fails to install open-match chart with disabled jaeger because it cannot find
openmatch.jaeger.agent template which is declared in jaeger subchart. Helm is not able to
find that template because jaeger subchart is not loaded because it is marked as disabled
in open-match chart dependencies.

* Update install-scale-chart target

Currently open-match-scale subchart is installed separately from open-match chart but they are tightly coupled.
Pods declared in scale subchart have dependencies on service accounts, config maps provisioned by open-match
chart. So the problem is that helm renders incorrect service account, config map names. It can be fixed by
specifying explicit names in install-scale-chart target.

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-22 21:14:27 +03:00
1a8fc62833 add @sawagh to codeowners () 2020-12-21 20:51:50 +03:00
1d5574b8a3 MMF backfill example ()
* Implement backfill querying

* Update location for stable and incubator charts

* Add MMF backfill example

* Simplify MMF backfill example

Co-authored-by: Alexander Apalikov <alexander.apalikov@globant.com>
2020-12-18 18:28:32 +03:00
75a3d43477 Fix typo ()
And trigger e2e-cluster tests on master.
2020-12-18 12:30:34 +03:00
252fc8090d Backfill: Autoincrement generation on every Backfill update ()
* Backfill: Autoincrement generation on every Backfill update

In order Backfill Cache to work in QueryBackfill, every update should
store a backfill as a new Generation Backfill.

In the future Generation could be renamed to Version field in Backfill,
one change at a time.

* Update Generation on Backend and Frontend Updates

No updates on AcknowledgeBackfill.

* Fix tests after merging master

Add initial Generation as 1 everywhere - on CreateBackfill from Backend
and Frontend.

* Add missing license header
2020-12-17 18:12:24 +03:00
2c617f2cb6 Update location for stable and incubator charts ()
* Implement backfill querying

* Update location for stable and incubator charts
2020-12-17 13:36:16 +03:00
fcd590eca6 Implement backfill querying () 2020-12-16 23:15:51 +03:00
4b3147511b create CODEOWNERS
list of those with review perms for easy PR review notifications
2020-12-14 15:29:58 -08:00
c85af44567 Frontend: UpdateBackfill and DeleteBackfill handlers () 2020-12-03 10:58:24 -08:00
688262111d Create, Update backfill after MMF run () 2020-12-02 17:51:27 -08:00
26d1aa236a Redis: Backfill last acknowledged () 2020-12-01 21:52:52 -08:00
fff37cd82c Update autogeneretaded protobuf files and Swagger for Frontend () 2020-11-30 10:26:34 -08:00
98a227b515 Update go.sum () 2020-11-30 09:59:35 -08:00
88cd95fe57 Backfill indexing () 2020-11-29 22:16:09 -08:00
248494c04c Frontend Create Backfill () 2020-11-25 22:54:56 -08:00
aa4398e786 Improve comments for RPC funcs () 2020-11-23 11:25:28 -08:00
fc5c3629e8 fixed the wrong spelling () 2020-11-23 09:43:30 -08:00
8d86709632 Makefile update to make api/api.md target commands universal across various environments () 2020-11-19 22:22:54 -08:00
0a273674b9 Update supported gke version for create-gke-cluster target () 2020-11-19 22:05:39 -08:00
e2247a7f53 Add Backfill support to internal statestore () 2020-11-19 14:20:55 -08:00
b269896c23 Undo change that I shouldn't have been able to do 2020-11-16 16:35:07 -08:00
a210185098 Testing change to build system, DO NOT SUBMIT 2020-11-16 16:33:49 -08:00
4df95deb54 Added test for unavailable gRPC function () 2020-11-12 21:29:40 -08:00
a9b8eec9e0 Updating the dependencies for the project ()
* Updated dependencies
* Updated tutorials dependencies
* Updated tests
2020-11-12 15:21:07 -05:00
afa59327a4 Add ability to filter backfills () 2020-11-08 21:57:57 -08:00
d86b6c5121 Add comments when displaying makefile usage () 2020-11-03 10:50:33 -08:00
2eb2921914 Adding Exclude property to DoubleRangeFilter and test coverage. () 2020-11-02 13:46:09 -08:00
80d882b7c7 Consider backfill's id when de-colliding matches () 2020-11-02 11:53:27 -08:00
0f34e31778 New fields in protobuf definitions () 2020-10-30 11:45:51 -07:00
d45eb74510 Revert "Unavailable gRPC match functions forces us to wait the proposalCollectionInterval before failing ()" ()
This reverts commit 1765ab7b7e8fcc24015f5c40938c661f82bdbc9a.
2020-10-29 11:58:32 -07:00
1765ab7b7e Unavailable gRPC match functions forces us to wait the proposalCollectionInterval before failing () 2020-10-28 11:30:30 -07:00
280 changed files with 20644 additions and 17131 deletions
.github
.golangci.yamlDockerfile.base-buildDockerfile.ciDockerfile.cmdMakefileREADME.md
api
cloudbuild.yaml
docs
examples
go.modgo.sum
install
internal
pkg
testing
third_party
protoc-gen-openapiv2/options
protoc-gen-swagger/options
swaggerui
tools/reaper/internal
tutorials
custom_evaluator
default_evaluator
matchmaker101
matchmaker102
redis-enterprise-cloud

1
.github/CODEOWNERS vendored Normal file

@ -0,0 +1 @@
* @laremere @aLekSer @calebatwd @syntxerror @sawagh @amg84 @scosgrave @mridulji @markmandel @joeholley @kazshinohara @kemurayama @govargo @ashutosji

@ -27,4 +27,4 @@ If the matter is security related, please disclose it privately via
**Open Match Release Version**:
**Install Method(yaml/helm):**:
**Install Method(yaml/helm)**:

@ -37,7 +37,7 @@ then you'll need to create the release branch.
```shell
# Create a local release branch.
git checkout -b release-0.5 upstream/master
git checkout -b release-0.5 upstream/main
# Push the branch upstream.
git push upstream release-0.5
```
@ -49,7 +49,7 @@ otherwise there should already be a `release-0.5` branch so run,
git checkout -b release-0.5 upstream/release-0.5
```
**NOTE: The branch name must be in the format, `release-X.Y` otherwise**
**NOTE: The branch name must be in the format, `release-X.Y.Z` otherwise**
**some artifacts will not be pushed.**
## Releases & Versions
@ -76,7 +76,7 @@ Hot Fixes:
Preview:
* Rare, a one off release cut from the master branch to provide early access
* Rare, a one off release cut from the main branch to provide early access
to APIs or some other major change.
* **NOTE: There's no branch for this release.**
* Example: 0.5-preview.1
@ -97,8 +97,8 @@ releases. Find {version} and replace with the current release (e.g. 0.5.0)
only required once.**
- [ ] Create the branch in the **upstream** repository. It should be named
release-X.Y. Example: release-0.5. At this point there's effectively a code
freeze for this version and all work on master will be included in a future
release-X.Y.Z. Example: release-0.5. At this point there's effectively a code
freeze for this version and all work on main will be included in a future
version. If you're on the branch that you created in the *getting setup*
section above you should be able to push upstream.
@ -106,18 +106,16 @@ only required once.**
git push origin release-0.5
```
- [ ] Announce a PR freeze on release-X.Y branch on [open-match-discuss@](mailing-list-post).
- [ ] Announce a PR freeze on release-X.Y.Z branch on [open-match-discuss@](https://groups.google.com/forum/#!forum/open-match-discuss).
- [ ] Open the [`Makefile`](makefile-version) and change BASE_VERSION entry.
- [ ] Open the [`install/helm/open-match/Chart.yaml`](om-chart-yaml-version) and change the `appVersion` and `version` entries.
- [ ] Open the [`install/helm/open-match/values.yaml`](om-values-yaml-version) and change the `tag` entries.
- [ ] Open the [`cloudbuild.yaml`] and change the `_OM_VERSION` entry.
- [ ] There might be additional references to the old version but be careful not to change it for places that have it for historical purposes.
- [ ] Run `make release`
- [ ] Run `make api/api.md` in open-match repo to update the auto-generated API references in open-match-docs repo.
- [ ] Use the files under the `build/release/` directory for the Open Match installation guide. Make sure the artifacts work as expected - these are the artifacts that will be published to the GCS bucket and used in our release assets.
- [ ] Update usage requirements in the Installation doc - e.g. supported minikube version, kubectl version, golang version, etc.
- [ ] Create a PR with the changes, include the release candidate name, and point it to the release branch.
- [ ] Go to [open-match-build](https://pantheon.corp.google.com/cloud-build/triggers?project=open-match-build) and update all *post submit* triggers' `_GCB_LATEST_VERSION` value to the `X.Y` of the release. This value should only increase as it's used to determine the latest stable version.
- [ ] Merge your changes once the PR is approved.
- [ ] Go to [open-match-build](https://pantheon.corp.google.com/cloud-build/triggers?project=open-match-build) and update all *post submit* triggers' `_GCB_LATEST_VERSION` value to the `X.Y.Z` of the release. This value should only increase as it's used to determine the latest stable version.
- [ ] Merge your changes once the PR is approved. Note: the helm chart is not published to the public registry until the merge is complete (it's a second cloud build trigger upon merge), so you won't be able to do final release testing until after all checks/approvals are finished!
## Create a release branch in the upstream open-match-docs repository
- [ ] Open [`Makefile`](makefile-version) and change BASE_VERSION entry.
@ -132,50 +130,46 @@ git push origin release-0.5
only required once.**
- [ ] Create the next [version milestone](https://github.com/googleforgames/open-match/milestones) and use [semantic versioning](https://semver.org/) when naming it to be consistent with the [Go community](https://blog.golang.org/versioning-proposal).
- [ ] Create a *draft* [release](https://github.com/googleforgames/open-match/releases). Note that github has both "Pre-release" and "draft" as different concepts for a release. Until the release is finalized, only use "Save draft", and do not use "Publish release".
- [ ] Use the [release template](https://github.com/googleforgames/open-match/blob/master/docs/governance/templates/release.md)
- [ ] `Tag` = v{version}. Example: v0.5.0. Append -rc.# for release candidates. Example: v0.5.0-rc.1.
- [ ] `Target` = release-X.Y. Example: release-0.5.
- [ ] `Release Title` = `Tag`
- [ ] `Write` section will contain the contents from the [release template](https://github.com/googleforgames/open-match/blob/master/docs/governance/templates/release.md).
- [ ] Add the milestone to all PRs and issues that were merged since the last milestone. Look at the [releases page](https://github.com/googleforgames/open-match/releases) and look for the "X commits to master since this release" for the diff.
- [ ] Review all [milestone-less closed issues](https://github.com/googleforgames/open-match/issues?q=is%3Aissue+is%3Aclosed+no%3Amilestone) and assign the appropriate milestone.
- [ ] Review all [issues in milestone](https://github.com/googleforgames/open-match/milestones) for proper [labels](https://github.com/googleforgames/open-match/labels) (ex: area/build).
- [ ] Use the [release template](https://github.com/googleforgames/open-match/blob/main/docs/governance/templates/release.md)
- [ ] `Tag = v{version}` (Example: v0.5.0. Append -rc.# for release candidates. Example: v0.5.0-rc.1.)
- [ ] `Target = release-X.Y.Z` (Example: release-0.5.)
- [ ] `Release Title = v{version}` (Must match `Tag`)
- [ ] `Write` section will contain the contents from the [release template](https://github.com/googleforgames/open-match/blob/main/docs/governance/templates/release.md).
- [ ] Add the milestone to all PRs and issues that were merged since the last milestone. Look at the [releases page](https://github.com/googleforgames/open-match/releases) and look for the "X commits to main since this release" for the diff.
- [ ] Review all [milestone-less closed PRs](https://github.com/googleforgames/open-match/pulls?q=is%3Apr+is%3Aclosed+no%3Amilestone) and assign the appropriate milestone.
- [ ] Review all [PRs in milestone](https://github.com/googleforgames/open-match/milestones) for proper [labels](https://github.com/googleforgames/open-match/labels) (ex: area/build).
- [ ] View all open entries in milestone and move them to a future milestone if they aren't getting closed in time. https://github.com/googleforgames/open-match/milestones/v{version}
- [ ] Review all closed PRs against the milestone. Put the user visible changes into the release notes using the suggested format. https://github.com/googleforgames/open-match/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aclosed+is%3Amerged+milestone%3Av{version}
- [ ] Review all closed issues against the milestone. Put the user visible changes into the release notes using the suggested format. https://github.com/googleforgames/open-match/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aclosed+milestone%3Av{version}
- [ ] Verify the [milestone](https://github.com/googleforgames/open-match/milestones) is effectively 100% at this point with the exception of the release issue itself.
- [ ] Verify everything in the [milestone](https://github.com/googleforgames/open-match/milestones) is complete with the exception of the release issue itself.
## Build Artifacts
## Build And Test Artifacts
- [ ] Go to the History section and find the "Post Submit" build of the merged commit that's running. Wait for it to go Green. If it's red, fix error repeat this section. Take note of the docker image version tag for next step. Example: 0.5.0-a4706cb.
- [ ] Navigate to the [Cloud Console](https://console.cloud.google.com) in a browser and open the [Cloud Build History section](https://console.cloud.google.com/cloud-build/builds?project=open-match-build) and find the latest "Post Submit" build (trigger id: 9a451c7a-197b-4a38-a612-21f4c53c42fd) of the merged commit. The build may still be running, if so wait for it to finish. If it failed, fix the error and repeat this section. Open the build details and click on step 12, "Build: Docker Images". Take note of the docker image version tag near the top of the build log. This is the "{source version tag}" referenced in various commands below. Example: `0.5.0-a4706cb`.
- [ ] Run `./docs/governance/templates/release.sh {source version tag} {version}` to copy the images to open-match-public-images.
- [ ] If this is a new minor version in the newest major version then run `./docs/governance/templates/release.sh {source version tag} latest`.
- [ ] Copy the files from `build/release/` generated from `make release` to the release draft you created. You can drag and drop the files using the Github UI.
- [ ] Update [Slack invitation link](https://slack.com/help/articles/201330256-invite-new-members-to-your-workspace#share-an-invite-link) in [open-match.dev](https://open-match.dev/site/docs/contribute/#get-involved).
- [ ] Test Open Match installation under GKE and Minikube enviroment using YAML files and Helm. Follow the [First Match](https://development.open-match.dev/site/docs/getting-started/first_match/) guide, run `make proxy-demo`, and open `localhost:51507` to make sure everything works.
- [ ] If this is not a release candidate or preview but a full release, run `./docs/governance/templates/release.sh {source version tag} latest` to tag these public images as the default version to pull from the registry.
- [ ] Once the images have successfully been pushed to the registry, modify the line `open-match.dev/open-match v0.0.0-dev` in all `go.mod` files in the [Tutorials] (https://github.com/googleforgames/open-match/tree/main/tutorials) directory to use the current release version for the remainder of your local release testing. This includes all solution subdirectories as well. This change is local only and doesn't get committed to git.
- [ ] Copy the installation files named `{sequence_number}-{component}.yaml` (example: `01-open-match-core.yaml`) from the [build folder in the private open-match-build-artifacts GCS bucket https://storage.mtls.cloud.google.com/open-match-build-artifacts/{version}](https://console.cloud.google.com/storage/browser/open-match-build-artifacts?project=open-match-build) to the release draft you created. Download them to your local machine, and then attach them to the draft using the Github UI. Note: the `05-jaeger.yaml` file no longer exists after release 1.8, so don't be surprised if that number is missing.
- [ ] Update the [Slack invitation link](https://slack.com/help/articles/201330256-invite-new-members-to-your-workspace#share-an-invite-link) in [open-match.dev](https://open-match.dev/site/docs/contribute/#get-involved).
- [ ] Test Open Match installation under GKE and Minikube enviroment using the YAML files attached to the release and the latest Helm chart, pulled from the public helm repo (not your local copy from github). Follow the [First Match](https://development.open-match.dev/site/docs/getting-started/first_match/) guide, run `make proxy-demo`, and open `localhost:51507` to make sure everything works.
- [ ] Minikube: Run `make create-mini-cluster` to create a local cluster with latest Kubernetes API version.
- [ ] GKE: Run `make create-gke-cluster` to create a GKE cluster.
- [ ] Helm: Run `helm install open-match -n open-match open-match/open-match`
- [ ] Update usage requirements in the Installation doc - e.g. supported minikube version, kubectl version, golang version, etc.
- [ ] Helm: Run `helm install open-match -n open-match open-match/open-match`. Note, the helm chart for the release is not public until the PR has been merged, so you cannot complete this step until after the PR is closed and the 'Tagged Build' trigger (trigger ID: 083adc1a-fcac-4033-bc38-b9f6eadcb75d) has completed, which publishes the helm chart.
## Finalize
- [ ] Save the release as a draft.
- [ ] Make sure your release draft reflects all steps up to this point, and is saved (so contributors can review it).
- [ ] Circulate the draft release to active contributors. Where reasonable, get everyone's ok on the release notes before continuing.
- [ ] Publish the [Release](om-release) in Github. This will notify repository watchers.
- [ ] Publish the [Release](om-release) on Open Match [Blog](https://open-match.dev/site/blog/).
## Announce
- [ ] Send an email to the [mailing list](mailing-list-post) with the release details (copy-paste the release blog post)
- [ ] Send a chat on the [Slack channel](om-slack). "Open Match {version} has been released! Check it out at {release url}."
- [ ] Send an email to the [mailing list](https://groups.google.com/forum/#!newtopic/open-match-discuss) with the release details (copy-paste the release blog post)
- [ ] Send a chat on the [Slack channel](https://open-match.slack.com/). "Open Match {version} has been released! Check it out at {release url}."
[om-slack]: https://open-match.slack.com/
[mailing-list-post]: https://groups.google.com/forum/#!newtopic/open-match-discuss
[release-template]: https://github.com/googleforgames/open-match/blob/master/docs/governance/templates/release.md
[makefile-version]: https://github.com/googleforgames/open-match/blob/master/Makefile#L53
[om-chart-yaml-version]: https://github.com/googleforgames/open-match/blob/master/install/helm/open-match/Chart.yaml#L16
[om-values-yaml-version]: https://github.com/googleforgames/open-match/blob/master/install/helm/open-match/values.yaml#L16
[makefile-version]: https://github.com/googleforgames/open-match/blob/main/Makefile#L53
[om-chart-yaml-version]: https://github.com/googleforgames/open-match/blob/main/install/helm/open-match/Chart.yaml#L16
[om-values-yaml-version]: https://github.com/googleforgames/open-match/blob/main/install/helm/open-match/values.yaml#L16
[om-release]: https://github.com/googleforgames/open-match/releases/new
[readme-deploy]: https://github.com/googleforgames/open-match/blob/master/README.md#deploy-to-kubernetes
[readme-deploy]: https://github.com/googleforgames/open-match/blob/main/README.md#deploy-to-kubernetes

@ -175,6 +175,7 @@ linters:
- gosec
- interfacer # deprecated - "A tool that suggests interfaces is prone to bad suggestions"
- lll
- typecheck
#linters:
# enable-all: true

@ -13,8 +13,7 @@
# limitations under the License.
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
FROM golang:1.14.0
ENV GO111MODULE=on
FROM golang:1.21.5
WORKDIR /go/src/open-match.dev/open-match

@ -11,47 +11,65 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian:bookworm
FROM debian
RUN apt-get update
RUN apt-get install -y -qq git make python3 virtualenv curl sudo unzip apt-transport-https ca-certificates curl software-properties-common gnupg2
# Docker
RUN curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
RUN sudo apt-key fingerprint 0EBFCD88
RUN sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
stretch \
stable"
RUN sudo apt-get update
RUN sudo apt-get install -y -qq docker-ce docker-ce-cli containerd.io
# Cloud SDK
RUN export CLOUD_SDK_REPO="cloud-sdk-stretch" && \
echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
apt-get update -y && apt-get install google-cloud-sdk google-cloud-sdk-app-engine-go -y -qq
# Install Golang
# https://github.com/docker-library/golang/blob/master/1.14/stretch/Dockerfile
RUN mkdir -p /toolchain/golang
WORKDIR /toolchain/golang
RUN sudo rm -rf /usr/local/go/
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
RUN curl -L https://golang.org/dl/go1.14.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
RUN sudo mkdir -p "$GOPATH/src" "$GOPATH/bin" \
&& sudo chmod -R 777 "$GOPATH"
# Prepare toolchain and workspace
RUN mkdir -p /toolchain
WORKDIR /workspace
# set env vars
ARG DEBIAN_FRONTEND=noninteractive
ENV OPEN_MATCH_CI_MODE=1
ENV KUBECONFIG=$HOME/.kube/config
RUN mkdir -p $HOME/.kube/
ENV GOPATH=/go
ENV PATH=$GOPATH/bin:/usr/local/go/bin:$PATH
RUN apt-get update -y \
&& apt-get install -y -qq --no-install-recommends \
apt-utils \
git \
make \
python3 \
virtualenv \
curl \
sudo \
unzip \
apt-transport-https \
ca-certificates \
curl \
software-properties-common \
gnupg2 \
libc6 \
build-essential
RUN mkdir -p /usr/share/keyrings/
# Docker
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker.gpg] https://download.docker.com/linux/debian bookworm stable" | tee -a /etc/apt/sources.list.d/docker.list \
&& curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker.gpg \
&& apt-get update -y \
&& apt-get install -y -qq \
docker-ce \
docker-ce-cli \
containerd.io
# Cloud SDK
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list \
&& curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - \
&& apt-get update -y \
&& apt-get install -y -qq \
google-cloud-cli \
google-cloud-sdk-gke-gcloud-auth-plugin
# Install Golang
RUN mkdir -p /toolchain/golang
WORKDIR /toolchain/golang
RUN rm -rf /usr/local/go/
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
# reference: https://github.com/docker-library/golang/blob/master/1.20/bookworm/Dockerfile
RUN curl -L https://golang.org/dl/go1.21.5.linux-amd64.tar.gz | tar -C /usr/local -xz
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" \
&& chmod -R 777 "$GOPATH"
# Prepare toolchain, workspace, homedir
RUN mkdir -p \
/toolchain \
/workspace \
$HOME/.kube/
WORKDIR /workspace

@ -18,7 +18,9 @@ WORKDIR /go/src/open-match.dev/open-match
ARG IMAGE_TITLE
RUN make "build/cmd/${IMAGE_TITLE}"
RUN --mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
make "build/cmd/${IMAGE_TITLE}"
FROM gcr.io/distroless/static:nonroot
ARG IMAGE_TITLE

287
Makefile

@ -15,44 +15,45 @@
## Open Match Make Help
## ====================
##
## Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## # Create a GKE Cluster (requires gcloud installed and initialized, https://cloud.google.com/sdk/docs/quickstarts)
## make activate-gcp-apis
## make create-gke-cluster push-helm
##
## Create a Minikube Cluster (requires VirtualBox)
## # Create a Minikube Cluster (requires VirtualBox)
## make create-mini-cluster push-helm
##
## Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## # Create a KinD Cluster (Follow instructions to run command before pushing helm.)
## make create-kind-cluster get-kind-kubeconfig
## Finish KinD setup by installing helm:
##
## # Finish KinD setup by installing helm:
## make push-helm
##
## Deploy Open Match
## # Deploy Open Match
## make push-images -j$(nproc)
## make install-chart
##
## Build and Test
## # Build and Test
## make all -j$(nproc)
## make test
##
## Access telemetry
## # Access telemetry
## make proxy-prometheus
## make proxy-grafana
## make proxy-ui
##
## Teardown
## # Teardown
## make delete-mini-cluster
## make delete-gke-cluster
## make delete-kind-cluster && export KUBECONFIG=""
##
## Prepare a Pull Request
## # Prepare a Pull Request
## make presubmit
##
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 0.0.0-dev
BASE_VERSION = 1.8.1
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -60,15 +61,15 @@ BUILD_DATE = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
YEAR_MONTH = $(shell date -u +'%Y%m')
YEAR_MONTH_DAY = $(shell date -u +'%Y%m%d')
MAJOR_MINOR_VERSION = $(shell echo $(BASE_VERSION) | cut -d '.' -f1).$(shell echo $(BASE_VERSION) | cut -d '.' -f2)
PROTOC_VERSION = 3.10.1
HELM_VERSION = 3.0.0
KUBECTL_VERSION = 1.16.2
PROTOC_VERSION = 24.0
HELM_VERSION = 3.12.3
KUBECTL_VERSION = 1.27.3
MINIKUBE_VERSION = latest
GOLANGCI_VERSION = 1.18.0
KIND_VERSION = 0.5.1
SWAGGERUI_VERSION = 3.24.2
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
GRPC_GATEWAY_VERSION = 1.14.3
GRPC_GATEWAY_VERSION = 2.16.2
TERRAFORM_VERSION = 0.12.13
CHART_TESTING_VERSION = 2.4.0
@ -76,7 +77,7 @@ CHART_TESTING_VERSION = 2.4.0
REDIS_DEV_PASSWORD = helloworld
ENABLE_SECURITY_HARDENING = 0
GO = GO111MODULE=on go
GO = go
# Defines the absolute local directory of the open-match project
REPOSITORY_ROOT := $(patsubst %/,%,$(dir $(abspath $(MAKEFILE_LIST))))
BUILD_DIR = $(REPOSITORY_ROOT)/build
@ -99,11 +100,10 @@ GCP_ZONE = us-west1-a
GCP_LOCATION = $(GCP_ZONE)
EXE_EXTENSION =
GCP_LOCATION_FLAG = --zone $(GCP_ZONE)
GO111MODULE = on
GOLANG_TEST_COUNT = 1
GOLANG_EXTRA_TEST_FLAGS =
SWAGGERUI_PORT = 51500
PROMETHEUS_PORT = 9090
JAEGER_QUERY_PORT = 16686
GRAFANA_PORT = 3000
FRONTEND_PORT = 51504
BACKEND_PORT = 51505
@ -120,6 +120,7 @@ CERTGEN = $(TOOLCHAIN_BIN)/certgen$(EXE_EXTENSION)
GOLANGCI = $(TOOLCHAIN_BIN)/golangci-lint$(EXE_EXTENSION)
CHART_TESTING = $(TOOLCHAIN_BIN)/ct$(EXE_EXTENSION)
GCLOUD = gcloud --quiet
USE_GKE_GCLOUD_AUTH_PLUGIN = True
OPEN_MATCH_HELM_NAME = open-match
OPEN_MATCH_KUBERNETES_NAMESPACE = open-match
OPEN_MATCH_SECRETS_DIR = $(REPOSITORY_ROOT)/install/helm/open-match/secrets
@ -187,9 +188,11 @@ else
endif
endif
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
GOLANG_PROTOS = pkg/pb/backend.pb.go pkg/pb/frontend.pb.go pkg/pb/matchfunction.pb.go pkg/pb/query.pb.go pkg/pb/messages.pb.go pkg/pb/extensions.pb.go pkg/pb/evaluator.pb.go internal/ipb/synchronizer.pb.go internal/ipb/messages.pb.go pkg/pb/backend.pb.gw.go pkg/pb/frontend.pb.gw.go pkg/pb/matchfunction.pb.gw.go pkg/pb/query.pb.gw.go pkg/pb/evaluator.pb.gw.go
golang-protos: $(GOLANG_PROTOS)
SWAGGER_JSON_DOCS = api/frontend.swagger.json api/backend.swagger.json api/query.swagger.json api/matchfunction.swagger.json api/evaluator.swagger.json
swagger-json-docs: $(SWAGGER_JSON_DOCS)
ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
@ -197,7 +200,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel base-build
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-backfill base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -209,17 +212,18 @@ local-cloud-build: gcloud
################################################################################
## #############################################################################
## Image commands:
## These commands are auto-generated based on a complete list of images. All
## folders in cmd/ are turned into an image using Dockerfile.cmd. Additional
## images are specified by the IMAGES variable. Image commands ommit the
## "openmatch-" prefix on the image name and tags.
## These commands are auto-generated based on a complete list of images.
## All folders in cmd/ are turned into an image using Dockerfile.cmd.
## Additional images are specified by the IMAGES variable.
## Image commands ommit the "openmatch-" prefix on the image name and tags.
##
list-images:
@echo $(IMAGES)
#######################################
## build-images / build-<image name>-image: builds images locally
## # Builds images locally
## build-images / build-<image name>-image
##
build-images: $(foreach IMAGE,$(IMAGES),build-$(IMAGE)-image)
@ -240,9 +244,12 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
build-mmf-go-soloduel-image: docker build-base-build-image
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
build-mmf-go-backfill-image: docker build-base-build-image
docker build -f examples/functions/golang/backfill/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-backfill:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-backfill:$(ALTERNATE_TAG) .
#######################################
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
## # Builds and pushes images to your container registry.
## push-images / push-<image name>-image
##
push-images: $(foreach IMAGE,$(IMAGES),push-$(IMAGE)-image)
@ -261,8 +268,9 @@ endif
endif
#######################################
## retag-images / retag-<image name>-image: publishes images on the public
## container registry. Used for publishing releases.
## # Publishes images on the public container registry.
## # Used for publishing releases.
## retag-images / retag-<image name>-image
##
retag-images: $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image)
@ -275,7 +283,8 @@ $(foreach IMAGE,$(IMAGES),retag-$(IMAGE)-image): retag-%-image: docker
docker push $(TARGET_REGISTRY)/openmatch-$*:$(TAG)
#######################################
## clean-images / clean-<image name>-image: removes images from local docker
## # Removes images from local docker
## clean-images / clean-<image name>-image
##
clean-images: docker $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image)
-docker rmi -f open-match-base-build
@ -285,7 +294,7 @@ $(foreach IMAGE,$(IMAGES),clean-$(IMAGE)-image): clean-%-image:
#####################################################################################################################
update-chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com; $(HELM) dependency update)
(cd $(REPOSITORY_ROOT)/install/helm/open-match; $(HELM) repo add incubator https://charts.helm.sh/incubator; $(HELM) repo add bitnami https://charts.bitnami.com/bitnami;$(HELM) dependency update)
lint-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/ct$(EXE_EXTENSION)
(cd $(REPOSITORY_ROOT)/install/helm; $(HELM) lint $(OPEN_MATCH_HELM_NAME))
@ -298,8 +307,8 @@ build/chart/open-match-$(BASE_VERSION).tgz: build/toolchain/bin/helm$(EXE_EXTENS
build/chart/index.yaml: build/toolchain/bin/helm$(EXE_EXTENSION) gcloud build/chart/open-match-$(BASE_VERSION).tgz
mkdir -p $(BUILD_DIR)/chart-index/
-gsutil cp gs://open-match-chart/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp gs://open-match-chart/chart/open-match-* $(BUILD_DIR)/chart-index/
-gsutil cp $(_CHARTS_BUCKET)/chart/index.yaml $(BUILD_DIR)/chart-index/
-gsutil -m cp $(_CHARTS_BUCKET)/chart/open-match-* $(BUILD_DIR)/chart-index/
$(HELM) repo index $(BUILD_DIR)/chart-index/
$(HELM) repo index --merge $(BUILD_DIR)/chart-index/index.yaml $(BUILD_DIR)/chart/
@ -310,10 +319,9 @@ build/chart/: build/chart/index.yaml build/chart/index.yaml.$(YEAR_MONTH_DAY)
install-chart-prerequisite: build/toolchain/bin/kubectl$(EXE_EXTENSION) update-chart-deps
-$(KUBECTL) create namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)
$(KUBECTL) apply -f install/gke-metadata-server-workaround.yaml
# Used for Open Match development. Install om-configmap-override.yaml by default.
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD)
HELM_UPGRADE_FLAGS = --cleanup-on-fail -i --no-hooks --debug --timeout=600s --namespace=$(OPEN_MATCH_KUBERNETES_NAMESPACE) --set global.gcpProjectId=$(GCP_PROJECT_ID) --set open-match-override.enabled=true --set redis.password=$(REDIS_DEV_PASSWORD) --set redis.auth.enabled=false --set redis.auth.sentinel=false
# Used for generate static yamls. Install om-configmap-override.yaml as needed.
HELM_TEMPLATE_FLAGS = --no-hooks --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --set usingHelmTemplate=true
HELM_IMAGE_FLAGS = --set global.image.registry=$(REGISTRY) --set global.image.tag=$(TAG)
@ -332,7 +340,6 @@ install-large-chart: install-chart-prerequisite install-demo build/toolchain/bin
--set open-match-customize.enabled=true \
--set open-match-customize.evaluator.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set global.telemetry.jaeger.enabled=true \
--set global.telemetry.prometheus.enabled=true
# install-chart will install open-match-core, open-match-demo, with the demo evaluator and mmf.
@ -350,14 +357,16 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
--set open-match-customize.evaluator.enabled=true \
--set open-match-customize.function.image=openmatch-scale-mmf \
--set global.telemetry.grafana.enabled=true \
--set global.telemetry.jaeger.enabled=false \
--set global.telemetry.prometheus.enabled=true
$(HELM) template $(OPEN_MATCH_HELM_NAME)-scale install/helm/open-match $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) -f install/helm/open-match/values-production.yaml \
--set open-match-core.enabled=false \
--set open-match-core.redis.enabled=false \
--set global.telemetry.prometheus.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set open-match-scale.enabled=true | $(KUBECTL) apply -f -
--set global.kubernetes.serviceAccount=$(OPEN_MATCH_HELM_NAME)-unprivileged-service \
--set open-match-scale.enabled=true \
--set open-match-scale.configs.default.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-default\" \}\}" \
--set open-match-scale.configs.override.configName="\{\{ printf \"$(OPEN_MATCH_HELM_NAME)-configmap-override\" \}\}" | $(KUBECTL) apply -f -
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
@ -369,7 +378,7 @@ install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTEN
--set open-match-core.registrationInterval=200ms \
--set open-match-core.proposalCollectionInterval=200ms \
--set open-match-core.assignedDeleteTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=1s \
--set open-match-core.queryPageSize=10 \
--set global.gcpProjectId=intentionally-invalid-value \
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
@ -378,8 +387,8 @@ install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTEN
delete-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(HELM) uninstall $(OPEN_MATCH_HELM_NAME)
-$(HELM) uninstall $(OPEN_MATCH_HELM_NAME)-demo
-$(KUBECTL) delete psp,clusterrole,clusterrolebinding --selector=release=open-match
-$(KUBECTL) delete psp,clusterrole,clusterrolebinding --selector=release=open-match-demo
-$(KUBECTL) delete clusterrole,clusterrolebinding --selector=release=open-match
-$(KUBECTL) delete clusterrole,clusterrolebinding --selector=release=open-match-demo
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)-demo
@ -387,14 +396,11 @@ ifneq ($(BASE_VERSION), 0.0.0-dev)
install/yaml/: REGISTRY = gcr.io/$(OPEN_MATCH_PUBLIC_IMAGES_PROJECT_ID)
install/yaml/: TAG = $(BASE_VERSION)
endif
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
# We have to hard-code the Jaeger endpoints as we are excluding Jaeger, so Helm cannot determine the endpoints from the Jaeger subchart
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
--set-string global.telemetry.jaeger.agentEndpoint="$(OPEN_MATCH_HELM_NAME)-jaeger-agent:6831" \
--set-string global.telemetry.jaeger.collectorEndpoint="http://$(OPEN_MATCH_HELM_NAME)-jaeger-collector:14268/api/traces" \
install/helm/open-match > install/yaml/01-open-match-core.yaml
install/yaml/02-open-match-demo.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -423,15 +429,6 @@ install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set-string global.telemetry.grafana.prometheusServer="http://$(OPEN_MATCH_HELM_NAME)-prometheus-server.$(OPEN_MATCH_KUBERNETES_NAMESPACE).svc.cluster.local:80/" \
install/helm/open-match > install/yaml/04-grafana-chart.yaml
install/yaml/05-jaeger-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
--set open-match-core.enabled=false \
--set open-match-core.redis.enabled=false \
--set open-match-telemetry.enabled=true \
--set global.telemetry.jaeger.enabled=true \
install/helm/open-match > install/yaml/05-jaeger-chart.yaml
install/yaml/06-open-match-override-configmap.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
mkdir -p install/yaml/
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
@ -456,7 +453,6 @@ install/yaml/install.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
--set open-match-customize.enabled=true \
--set open-match-customize.evaluator.enabled=true \
--set open-match-telemetry.enabled=true \
--set global.telemetry.jaeger.enabled=true \
--set global.telemetry.grafana.enabled=true \
--set global.telemetry.prometheus.enabled=true \
install/helm/open-match > install/yaml/install.yaml
@ -468,10 +464,28 @@ set-redis-password:
stty echo; \
printf "\n"; \
$(KUBECTL) create secret generic open-match-redis -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) --from-literal=redis-password=$$REDIS_PASSWORD --dry-run -o yaml | $(KUBECTL) replace -f - --force
## ####################################
## # Tool installation helpers
##
## # Install toolchain. Short for installing K8s, protoc and OpenMatch tools.
## make install-toolchain
##
install-toolchain: install-kubernetes-tools install-protoc-tools install-openmatch-tools
## # Install Kubernetes tools
## make install-kubernetes-tools
##
install-kubernetes-tools: build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/terraform$(EXE_EXTENSION)
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
## # Install protoc tools
## make install-protoc-tools
##
install-protoc-tools: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go-grpc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
## # Install OpenMatch tools
## make install-openmatch-tools
##
install-openmatch-tools: build/toolchain/bin/certgen$(EXE_EXTENSION) build/toolchain/bin/reaper$(EXE_EXTENSION)
build/toolchain/bin/helm$(EXE_EXTENSION):
@ -536,18 +550,22 @@ build/toolchain/bin/protoc$(EXE_EXTENSION):
build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc
cd $(TOOLCHAIN_BIN) && $(GO) get github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc && $(GO) build -pkgdir . github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc
build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/golang/protobuf/protoc-gen-go
cd $(TOOLCHAIN_BIN) && $(GO) get google.golang.org/protobuf/cmd/protoc-gen-go && $(GO) build -pkgdir . google.golang.org/protobuf/cmd/protoc-gen-go
build/toolchain/bin/protoc-gen-go-grpc$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) get google.golang.org/grpc/cmd/protoc-gen-go-grpc && $(GO) build -pkgdir . google.golang.org/grpc/cmd/protoc-gen-go-grpc
build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION):
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
cd $(TOOLCHAIN_BIN) && $(GO) get github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway && $(GO) build -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway
build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
cd $(TOOLCHAIN_BIN) && $(GO) get github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 && $(GO) build -pkgdir . github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2
build/toolchain/bin/certgen$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
@ -608,76 +626,81 @@ delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bi
create-cluster-role-binding:
$(KUBECTL) create clusterrolebinding myname-cluster-admin-binding --clusterrole=cluster-admin --user=$(GCLOUD_ACCOUNT_EMAIL)
create-gke-cluster: GKE_VERSION = 1.14.10-gke.45 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-8 --enable-autoscaling --min-nodes 1 --num-nodes 6 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container clusters create $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GKE_CLUSTER_SHAPE_FLAGS) $(GKE_FUTURE_COMPAT_FLAGS) $(GKE_CLUSTER_FLAGS) \
--enable-pod-security-policy \
--cluster-version $(GKE_VERSION) \
$(GCLOUD) $(GCP_PROJECT_FLAG) container clusters create $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GKE_CLUSTER_SHAPE_FLAGS) $(GKE_FUTURE_COMPAT_FLAGS) $(GKE_CLUSTER_FLAGS) \
--cluster-version 1.27.3-gke.1700 \
--image-type cos_containerd \
--tags open-match
$(MAKE) create-cluster-role-binding
--tags open-match \
--workload-pool $(GCP_PROJECT_ID).svc.id.goog
delete-gke-cluster: gcloud
-$(GCLOUD) $(GCP_PROJECT_FLAG) container clusters delete $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) $(GCLOUD_EXTRA_FLAGS)
create-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
$(MINIKUBE) start --memory 6144 --cpus 4 --disk-size 50g
$(MINIKUBE) start -p openmatch --memory 6144 --cpus 4 --disk-size 50g --kubernetes-version=v1.27.3
delete-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
-$(MINIKUBE) delete
-$(MINIKUBE) delete -p openmatch
gcp-apply-binauthz-policy: build/policies/binauthz.yaml
$(GCLOUD) beta $(GCP_PROJECT_FLAG) container binauthz policy import build/policies/binauthz.yaml
## ####################################
## # Protobuf
##
## # Build all protobuf definitions.
## make all-protos
##
all-protos: $(ALL_PROTOS)
# The proto generator really wants to be run from the $GOPATH root, and doesn't
# support methods for directing it to the correct location that's not the proto
# file's location. So instead put it in a tempororary directory, then move it
# out.
pkg/pb/%.pb.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
# file's location.
# So, instead, put it in a tempororary directory, then move it out.
pkg/pb/%.pb.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go-grpc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/pkg/pb
$(PROTOC) $< \
$(PROTOC) $< \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--go_out=plugins=grpc:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/$@ $@
--go_out=$(REPOSITORY_ROOT)/build/prototmp \
--go-grpc_out=require_unimplemented_servers=false:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/pkg/pb/* $(REPOSITORY_ROOT)/pkg/pb/
internal/ipb/%.pb.go: internal/api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
internal/ipb/%.pb.go: internal/api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go-grpc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/internal/ipb
$(PROTOC) $< \
$(PROTOC) $< \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--go_out=plugins=grpc:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/$@ $@
--go_out=$(REPOSITORY_ROOT)/build/prototmp \
--go-grpc_out=require_unimplemented_servers=false:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/internal/ipb/* $(REPOSITORY_ROOT)/internal/ipb/
pkg/pb/%.pb.gw.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
pkg/pb/%.pb.gw.go: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go-grpc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-grpc-gateway$(EXE_EXTENSION)
mkdir -p $(REPOSITORY_ROOT)/build/prototmp $(REPOSITORY_ROOT)/pkg/pb
$(PROTOC) $< \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--grpc-gateway_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)/build/prototmp
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/$@ $@
mv $(REPOSITORY_ROOT)/build/prototmp/open-match.dev/open-match/pkg/pb/* $(REPOSITORY_ROOT)/pkg/pb/
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION)
api/%.swagger.json: api/%.proto third_party/ build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-openapiv2$(EXE_EXTENSION)
$(PROTOC) $< \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--swagger_out=logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
--openapiv2_out=json_names_for_fields=false,logtostderr=true,allow_delete_body=true:$(REPOSITORY_ROOT)
## # Build API reference in markdown. Needs open-match-docs repo at the same level as this one.
## make api/api.md
##
api/api.md: third_party/ build/toolchain/bin/protoc-gen-doc$(EXE_EXTENSION)
$(PROTOC) api/*.proto \
-I $(REPOSITORY_ROOT) -I $(PROTOC_INCLUDES) \
--doc_out=. \
--doc_opt=markdown,api.md
--doc_opt=markdown,api_temp.md
# Crazy hack that insert hugo link reference to this API doc -)
$(SED_REPLACE) '1 i\---\
title: "Open Match API References" \
linkTitle: "Open Match API References" \
weight: 2 \
description: \
This document provides API references for Open Match services. \
--- \
' ./api.md && mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
cat ./docs/hugo_apiheader.txt ./api_temp.md >> api.md
mv ./api.md $(REPOSITORY_ROOT)/../open-match-docs/site/content/en/docs/Reference/
rm ./api_temp.md
# Include structure of the protos needs to be called out do the dependency chain is run through properly.
pkg/pb/backend.pb.go: pkg/pb/messages.pb.go
@ -686,7 +709,15 @@ pkg/pb/matchfunction.pb.go: pkg/pb/messages.pb.go
pkg/pb/query.pb.go: pkg/pb/messages.pb.go
pkg/pb/evaluator.pb.go: pkg/pb/messages.pb.go
internal/ipb/synchronizer.pb.go: pkg/pb/messages.pb.go
internal/ipb/messages.pb.go: pkg/pb/messages.pb.go
## ####################################
## # Go tasks
##
## # Build assets and binaries
## make build
##
build: assets
$(GO) build ./...
$(GO) build -tags e2ecluster ./...
@ -694,8 +725,10 @@ build: assets
define test_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./... && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./... \
$(GO) mod tidy && \
$(GO) mod download -x && \
CGO_ENABLED=1 $(GO) test $(GOLANG_EXTRA_TEST_FLAGS) -p 1 -cover -test.count $(GOLANG_TEST_COUNT) -race -vet=off ./... && \
CGO_ENABLED=0 $(GO) test $(GOLANG_EXTRA_TEST_FLAGS) -p 1 -cover -test.count $(GOLANG_TEST_COUNT) -vet=off -run IgnoreRace$$ ./... \
)
$(foreach dir, $(wildcard $(1)/*/.), $(call test_folder, $(dir)))
endef
@ -708,14 +741,20 @@ define fast_test_folder
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
endef
## # Run go tests
## make test
##
test: $(ALL_PROTOS) tls-certs third_party/
$(call test_folder,.)
## # Run go tests more quickly, but with worse flake and race detection
## make fasttest
##
fasttest: $(ALL_PROTOS) tls-certs third_party/
$(call fast_test_folder,.)
test-e2e-cluster: all-protos tls-certs third_party/
$(HELM) test --timeout 7m30s -v 0 --logs -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(OPEN_MATCH_HELM_NAME)
$(HELM) test --timeout 15m --debug -v 0 --logs -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(OPEN_MATCH_HELM_NAME)
fmt:
$(GO) fmt ./...
@ -725,8 +764,11 @@ vet:
$(GO) vet ./...
golangci: build/toolchain/bin/golangci-lint$(EXE_EXTENSION)
GO111MODULE=on $(GOLANGCI) run --config=$(REPOSITORY_ROOT)/.golangci.yaml
$(GOLANGCI) run --config=$(REPOSITORY_ROOT)/.golangci.yaml
## # Run linter on Go code, charts and terraform
## make lint
##
lint: fmt vet golangci lint-chart terraform-lint
assets: $(ALL_PROTOS) tls-certs third_party/ build/chart/
@ -741,7 +783,7 @@ $(foreach CMD,$(CMDS),build/cmd/$(CMD)): build/cmd/%: build/cmd/%/BUILD_PHONY bu
build/cmd/%/BUILD_PHONY:
mkdir -p $(BUILD_DIR)/cmd/$*
CGO_ENABLED=0 $(GO) build -a -installsuffix cgo -o $(BUILD_DIR)/cmd/$*/run open-match.dev/open-match/cmd/$*
CGO_ENABLED=0 $(GO) build -v -installsuffix cgo -o $(BUILD_DIR)/cmd/$*/run open-match.dev/open-match/cmd/$*
# Default is that nothing needs to be copied into the direcotry
build/cmd/%/COPY_PHONY:
@ -797,13 +839,13 @@ md-test: docker
ci-deploy-artifacts: install/yaml/ $(SWAGGER_JSON_DOCS) build/chart/ gcloud
ifeq ($(_GCB_POST_SUBMIT),1)
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* gs://open-match-chart/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json gs://open-match-chart/api/v$(BASE_VERSION)/
gsutil cp -a public-read $(REPOSITORY_ROOT)/install/yaml/* $(_CHARTS_BUCKET)/install/v$(BASE_VERSION)/yaml/
gsutil cp -a public-read $(REPOSITORY_ROOT)/api/*.json $(_CHARTS_BUCKET)/api/v$(BASE_VERSION)/
# Deploy Helm Chart
# Since each build will refresh just it's version we can allow this for every post submit.
# Copy the files into multiple locations to keep a backup.
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* gs://open-match-chart/chart/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/by-hash/$(VERSION)/
gsutil cp -a public-read $(BUILD_DIR)/chart/*.* $(_CHARTS_BUCKET)/chart/
else
@echo "Not deploying build artifacts to open-match.dev because this is not a post commit change."
endif
@ -812,7 +854,7 @@ ci-reap-namespaces: build/toolchain/bin/reaper$(EXE_EXTENSION)
-$(TOOLCHAIN_BIN)/reaper -age=30m
# For presubmit we want to update the protobuf generated files and verify that tests are good.
presubmit: GOLANG_TEST_COUNT = 5
presubmit: GOLANG_TEST_COUNT = 3
presubmit: clean third_party/ update-chart-deps assets update-deps lint build test md-test terraform-test
build/release/: presubmit clean-install-yaml install/yaml/
@ -897,17 +939,13 @@ proxy-synchronizer: build/toolchain/bin/kubectl$(EXE_EXTENSION)
@echo "Synchronizer Trace: http://localhost:$(SYNCHRONIZER_PORT)/debug/tracez"
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(shell $(KUBECTL) get pod --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app=open-match,component=synchronizer,release=$(OPEN_MATCH_HELM_NAME)" --output jsonpath='{.items[0].metadata.name}') $(SYNCHRONIZER_PORT):51506 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-jaeger: build/toolchain/bin/kubectl$(EXE_EXTENSION)
@echo "Jaeger Query Frontend: http://localhost:16686"
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(shell $(KUBECTL) get pod --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app.kubernetes.io/name=jaeger,app.kubernetes.io/component=query" --output jsonpath='{.items[0].metadata.name}') $(JAEGER_QUERY_PORT):16686 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-grafana: build/toolchain/bin/kubectl$(EXE_EXTENSION)
@echo "User: admin"
@echo "Password: openmatch"
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(shell $(KUBECTL) get pod --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app=grafana,release=$(OPEN_MATCH_HELM_NAME)" --output jsonpath='{.items[0].metadata.name}') $(GRAFANA_PORT):3000 $(PORT_FORWARD_ADDRESS_FLAG)
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) service/$(shell $(KUBECTL) get service --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app.kubernetes.io/name=grafana" --output jsonpath='{.items[0].metadata.name}') $(GRAFANA_PORT):3000 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-prometheus: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(shell $(KUBECTL) get pod --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app=prometheus,component=server,release=$(OPEN_MATCH_HELM_NAME)" --output jsonpath='{.items[0].metadata.name}') $(PROMETHEUS_PORT):9090 $(PORT_FORWARD_ADDRESS_FLAG)
$(KUBECTL) port-forward --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) service/$(shell $(KUBECTL) get service --namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE) --selector="app=prometheus,component=server,release=$(OPEN_MATCH_HELM_NAME)" --output jsonpath='{.items[0].metadata.name}') $(PROMETHEUS_PORT):80 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-dashboard: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) port-forward --namespace kube-system $(shell $(KUBECTL) get pod --namespace kube-system --selector="app=kubernetes-dashboard" --output jsonpath='{.items[0].metadata.name}') $(DASHBOARD_PORT):9092 $(PORT_FORWARD_ADDRESS_FLAG)
@ -922,7 +960,7 @@ proxy-demo: build/toolchain/bin/kubectl$(EXE_EXTENSION)
# Run `make proxy` instead to run everything at the same time.
# If you run this directly it will just run each proxy sequentially.
proxy-all: proxy-frontend proxy-backend proxy-query proxy-grafana proxy-prometheus proxy-jaeger proxy-synchronizer proxy-ui proxy-dashboard proxy-demo
proxy-all: proxy-frontend proxy-backend proxy-query proxy-grafana proxy-prometheus proxy-synchronizer proxy-ui proxy-dashboard proxy-demo
proxy:
# This is an exception case where we'll call recursive make.
@ -931,8 +969,9 @@ proxy:
update-deps:
$(GO) mod tidy
$(MAKE) tutorial-deps
third_party/: third_party/google/api third_party/protoc-gen-swagger/options third_party/swaggerui/
third_party/: third_party/google/api third_party/protoc-gen-openapiv2/options third_party/swaggerui/
third_party/google/api:
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
@ -944,12 +983,12 @@ third_party/google/api:
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
third_party/protoc-gen-swagger/options:
third_party/protoc-gen-openapiv2/options:
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-openapiv2/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-openapiv2/options/
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
third_party/swaggerui/:
@ -966,9 +1005,25 @@ third_party/swaggerui/:
$(SED_REPLACE) 's|0.0.0-dev|$(BASE_VERSION)|g' $(REPOSITORY_ROOT)/third_party/swaggerui/config.json
rm -rf $(TOOLCHAIN_DIR)/swaggerui-temp
sync-deps:
clean-deps:
$(GO) clean -modcache
$(GO) mod download
sync-deps: clean-deps
$(GO) mod tidy
$(GO) mod download -x
define tutorial_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) mod tidy
)
$(foreach dir, $(wildcard $(1)/*/.), $(call tutorial_folder, $(dir)))
endef
tutorial-deps:
$(call tutorial_folder,./tutorials)
# Prevents users from running with sudo.
# There's an exception for Google Cloud Build because it runs as root.

@ -26,7 +26,7 @@ to Open Match.
## Support
* [Slack Channel](https://open-match.slack.com/) ([Signup](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLTM5ZWQxNjc1YWI3MzJmN2RiMWJmYWI0ZjFiNzNkZmNkMWQ3YWU5OGVkNzA5Yzc4OGVkOGU5MTc0OTA5ZTA5NDU))
* [Slack Channel](https://open-match.slack.com/) ([Signup](https://join.slack.com/t/open-match/shared_invite/zt-5k57lph3-Oe0WdatzL32xv6tPG3PfzQ))
* [File an Issue](https://github.com/googleforgames/open-match/issues/new)
* [Mailing list](https://groups.google.com/forum/#!forum/open-match-discuss)

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
info: {
title: "Backend"
version: "1.0"
@ -93,7 +93,7 @@ message ReleaseAllTicketsRequest{}
message ReleaseAllTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup{
message AssignmentGroup {
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
repeated string ticket_ids = 1;
@ -146,7 +146,6 @@ service BackendService {
// ReleaseTickets moves tickets from the pending state, to the active state.
// This enables them to be returned by query, and find different matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseTickets(ReleaseTicketsRequest) returns (ReleaseTicketsResponse) {
@ -159,7 +158,6 @@ service BackendService {
// ReleaseAllTickets moves all tickets from the pending state, to the active
// state. This enables them to be returned by query, and find different
// matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {

@ -13,6 +13,11 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "BackendService"
}
],
"schemes": [
"http",
"https"
@ -27,12 +32,21 @@
"/v1/backendservice/matches:fetch": {
"post": {
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
"operationId": "FetchMatches",
"operationId": "BackendService_FetchMatches",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchFetchMatchesResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchFetchMatchesResponse"
}
},
"404": {
@ -41,6 +55,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -61,7 +81,7 @@
"/v1/backendservice/tickets:assign": {
"post": {
"summary": "AssignTickets overwrites the Assignment field of the input TicketIds.",
"operationId": "AssignTickets",
"operationId": "BackendService_AssignTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -75,6 +95,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -94,9 +120,8 @@
},
"/v1/backendservice/tickets:release": {
"post": {
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseTickets",
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "BackendService_ReleaseTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -110,6 +135,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -129,9 +160,8 @@
},
"/v1/backendservice/tickets:releaseall": {
"post": {
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseAllTickets",
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "BackendService_ReleaseAllTickets",
"responses": {
"200": {
"description": "A successful response.",
@ -145,6 +175,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -172,12 +208,24 @@
],
"default": "UNKNOWN"
},
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignTicketsRequest": {
"type": "object",
"properties": {
"assignments": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchAssignmentGroup"
},
"description": "Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied."
@ -190,6 +238,7 @@
"failures": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchAssignmentFailure"
},
"description": "Failures is a list of all the Tickets that failed assignment along with the cause of failure."
@ -242,6 +291,44 @@
},
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a backfill. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -258,6 +345,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -326,6 +417,7 @@
"tickets": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets belonging to this match."
@ -336,6 +428,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -350,6 +450,7 @@
"pools": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchPool"
},
"description": "Set of pools to be queried when generating a match for this MatchProfile."
@ -374,6 +475,7 @@
"double_range_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
@ -381,12 +483,14 @@
"string_equals_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchStringEqualsFilter"
}
},
"tag_present_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
@ -496,6 +600,13 @@
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a ticket. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
@ -507,56 +618,36 @@
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"@type": {
"type": "string",
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
},
"value": {
"type": "string",
"format": "byte",
"description": "Must be a valid serialized protocol buffer of the above specified type."
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
}
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
"additionalProperties": {},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeStreamError": {
"rpcStatus": {
"type": "object",
"properties": {
"grpc_code": {
"code": {
"type": "integer",
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
},
"message": {
"type": "string"
},
"http_status": {
"type": "string"
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
},
"x-stream-definitions": {
"openmatchFetchMatchesResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchFetchMatchesResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
},
"title": "Stream result of openmatchFetchMatchesResponse"
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
}
},
"externalDocs": {

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
info: {
title: "Evaluator"
version: "1.0"
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message EvaluateRequest {

@ -13,6 +13,11 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "Evaluator"
}
],
"schemes": [
"http",
"https"
@ -27,12 +32,21 @@
"/v1/evaluator/matches:evaluate": {
"post": {
"summary": "Evaluate evaluates a list of proposed matches based on quality, collision status, and etc, then shortlist the matches and returns the final results.",
"operationId": "Evaluate",
"operationId": "Evaluator_Evaluate",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchEvaluateResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchEvaluateResponse"
}
},
"404": {
@ -41,6 +55,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -78,6 +98,44 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a backfill. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchEvaluateRequest": {
"type": "object",
"properties": {
@ -114,6 +172,7 @@
"tickets": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets belonging to this match."
@ -124,6 +183,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -178,6 +245,13 @@
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a ticket. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
@ -189,56 +263,36 @@
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"@type": {
"type": "string",
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
},
"value": {
"type": "string",
"format": "byte",
"description": "Must be a valid serialized protocol buffer of the above specified type."
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
}
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
"additionalProperties": {},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeStreamError": {
"rpcStatus": {
"type": "object",
"properties": {
"grpc_code": {
"code": {
"type": "integer",
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
},
"message": {
"type": "string"
},
"http_status": {
"type": "string"
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
},
"x-stream-definitions": {
"openmatchEvaluateResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchEvaluateResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
},
"title": "Stream result of openmatchEvaluateResponse"
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
}
},
"externalDocs": {

@ -19,10 +19,10 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
import "google/protobuf/empty.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
info: {
title: "Frontend"
version: "1.0"
@ -53,7 +53,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message CreateTicketRequest {
@ -81,6 +81,57 @@ message WatchAssignmentsResponse {
Assignment assignment = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message AcknowledgeBackfillRequest {
// An existing ID of Backfill to acknowledge.
string backfill_id = 1;
// An updated Assignment of the requested Backfill.
Assignment assignment = 2;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message AcknowledgeBackfillResponse {
// The Backfill that was acknowledged.
Backfill backfill = 1;
// All of the Tickets that were successfully assigned
repeated Ticket tickets = 2;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message CreateBackfillRequest {
// An empty Backfill object.
Backfill backfill = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message DeleteBackfillRequest {
// An existing ID of Backfill to delete.
string backfill_id = 1;
}
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message GetBackfillRequest {
// An existing ID of Backfill to retrieve.
string backfill_id = 1;
}
// UpdateBackfillRequest - update searchFields, extensions and set assignment.
//
// BETA FEATURE WARNING: This Request message is not finalized and still subject
// to possible change or removal.
message UpdateBackfillRequest {
// A Backfill object with ID set and fields to update.
Backfill backfill = 1;
}
// The FrontendService implements APIs to manage and query status of a Tickets.
service FrontendService {
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
@ -117,4 +168,55 @@ service FrontendService {
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
};
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info
// This triggers an assignment process.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc AcknowledgeBackfill(AcknowledgeBackfillRequest) returns (AcknowledgeBackfillResponse) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills/{backfill_id}/acknowledge"
body: "*"
};
}
// CreateBackfill creates a new Backfill object.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc CreateBackfill(CreateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
post: "/v1/frontendservice/backfills"
body: "*"
};
}
// DeleteBackfill receives a backfill ID and deletes its resource.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc DeleteBackfill(DeleteBackfillRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// GetBackfill returns a backfill object by its ID.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc GetBackfill(GetBackfillRequest) returns (Backfill) {
option (google.api.http) = {
get: "/v1/frontendservice/backfills/{backfill_id}"
};
}
// UpdateBackfill updates search_fields and extensions for the backfill with the provided id.
// Any tickets waiting for this backfill will be returned to the active pool, no longer pending.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc UpdateBackfill(UpdateBackfillRequest) returns (Backfill) {
option (google.api.http) = {
patch: "/v1/frontendservice/backfills"
body: "*"
};
}
}

@ -13,6 +13,11 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "FrontendService"
}
],
"schemes": [
"http",
"https"
@ -24,10 +29,221 @@
"application/json"
],
"paths": {
"/v1/frontendservice/backfills": {
"post": {
"summary": "CreateBackfill creates a new Backfill object.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_CreateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal.",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchCreateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
},
"patch": {
"summary": "UpdateBackfill updates search_fields and extensions for the backfill with the provided id.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_UpdateBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"description": "UpdateBackfillRequest - update searchFields, extensions and set assignment.\n\nBETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal.",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchUpdateBackfillRequest"
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}": {
"get": {
"summary": "GetBackfill returns a backfill object by its ID.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_GetBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchBackfill"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to retrieve.",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
},
"delete": {
"summary": "DeleteBackfill receives a backfill ID and deletes its resource.\nAny tickets waiting for this backfill will be returned to the active pool, no longer pending.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_DeleteBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"type": "object",
"properties": {}
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to delete.",
"in": "path",
"required": true,
"type": "string"
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/backfills/{backfill_id}/acknowledge": {
"post": {
"summary": "AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info\nThis triggers an assignment process.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "FrontendService_AcknowledgeBackfill",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchAcknowledgeBackfillResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "backfill_id",
"description": "An existing ID of Backfill to acknowledge.",
"in": "path",
"required": true,
"type": "string"
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"type": "object",
"properties": {
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Backfill."
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
}
}
],
"tags": [
"FrontendService"
]
}
},
"/v1/frontendservice/tickets": {
"post": {
"summary": "CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.\nA ticket is considered as ready for matchmaking once it is created.\n - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.\n - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.",
"operationId": "CreateTicket",
"operationId": "FrontendService_CreateTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -41,6 +257,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -61,7 +283,7 @@
"/v1/frontendservice/tickets/{ticket_id}": {
"get": {
"summary": "GetTicket get the Ticket associated with the specified TicketId.",
"operationId": "GetTicket",
"operationId": "FrontendService_GetTicket",
"responses": {
"200": {
"description": "A successful response.",
@ -75,6 +297,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -92,11 +320,12 @@
},
"delete": {
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
"operationId": "DeleteTicket",
"operationId": "FrontendService_DeleteTicket",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"type": "object",
"properties": {}
}
},
@ -106,6 +335,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -125,12 +360,21 @@
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
"get": {
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "WatchAssignments",
"operationId": "FrontendService_WatchAssignments",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
}
},
"404": {
@ -139,6 +383,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -157,6 +407,24 @@
}
},
"definitions": {
"openmatchAcknowledgeBackfillResponse": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "The Backfill that was acknowledged."
},
"tickets": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTicket"
},
"title": "All of the Tickets that were successfully assigned"
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -174,6 +442,54 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a backfill. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchCreateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "An empty Backfill object."
}
},
"description": "BETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchCreateTicketRequest": {
"type": "object",
"properties": {
@ -233,6 +549,13 @@
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a ticket. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
@ -241,6 +564,16 @@
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"openmatchUpdateBackfillRequest": {
"type": "object",
"properties": {
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "A Backfill object with ID set and fields to update."
}
},
"description": "UpdateBackfillRequest - update searchFields, extensions and set assignment.\n\nBETA FEATURE WARNING: This Request message is not finalized and still subject\nto possible change or removal."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
@ -253,56 +586,36 @@
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"@type": {
"type": "string",
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
},
"value": {
"type": "string",
"format": "byte",
"description": "Must be a valid serialized protocol buffer of the above specified type."
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
}
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
"additionalProperties": {},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeStreamError": {
"rpcStatus": {
"type": "object",
"properties": {
"grpc_code": {
"code": {
"type": "integer",
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
},
"message": {
"type": "string"
},
"http_status": {
"type": "string"
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
},
"x-stream-definitions": {
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
}
},
"externalDocs": {

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
info: {
title: "Match Function"
version: "1.0"
@ -69,8 +69,9 @@ message RunResponse {
// The MatchFunction service implements APIs to run user-defined matchmaking logics.
service MatchFunction {
// DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.
// Run pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then
// constructs and streams back match candidates to the Backend service.
// Run pulls Tickets that satisfy Profile constraints from QueryService,
// runs matchmaking logic against them, then constructs and streams back
// match candidates to the Backend service.
rpc Run(RunRequest) returns (stream RunResponse) {
option (google.api.http) = {
post: "/v1/matchfunction:run"

@ -13,6 +13,11 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "MatchFunction"
}
],
"schemes": [
"http",
"https"
@ -26,13 +31,22 @@
"paths": {
"/v1/matchfunction:run": {
"post": {
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService, runs matchmaking logics against them, then\nconstructs and streams back match candidates to the Backend service.",
"operationId": "Run",
"summary": "DO NOT CALL THIS FUNCTION MANUALLY. USE backend.FetchMatches INSTEAD.\nRun pulls Tickets that satisfy Profile constraints from QueryService,\nruns matchmaking logic against them, then constructs and streams back\nmatch candidates to the Backend service.",
"operationId": "MatchFunction_Run",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchRunResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchRunResponse"
}
},
"404": {
@ -41,6 +55,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -60,6 +80,17 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -77,6 +108,44 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a backfill. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -93,6 +162,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -115,6 +188,7 @@
"tickets": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets belonging to this match."
@ -125,6 +199,14 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"backfill": {
"$ref": "#/definitions/openmatchBackfill",
"description": "Backfill request which contains additional information to the match\nand contains an association to a GameServer.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
},
"allocate_gameserver": {
"type": "boolean",
"description": "AllocateGameServer signalise Director that Backfill is new and it should \nallocate a GameServer, this Backfill would be assigned.\nBETA FEATURE WARNING: This field is not finalized and still subject\nto possible change or removal."
}
},
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
@ -139,6 +221,7 @@
"pools": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchPool"
},
"description": "Set of pools to be queried when generating a match for this MatchProfile."
@ -163,6 +246,7 @@
"double_range_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
@ -170,12 +254,14 @@
"string_equals_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchStringEqualsFilter"
}
},
"tag_present_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
@ -282,6 +368,13 @@
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a ticket. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
@ -293,56 +386,36 @@
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"@type": {
"type": "string",
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
},
"value": {
"type": "string",
"format": "byte",
"description": "Must be a valid serialized protocol buffer of the above specified type."
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
}
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
"additionalProperties": {},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeStreamError": {
"rpcStatus": {
"type": "object",
"properties": {
"grpc_code": {
"code": {
"type": "integer",
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
},
"message": {
"type": "string"
},
"http_status": {
"type": "string"
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
},
"x-stream-definitions": {
"openmatchRunResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchRunResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
},
"title": "Stream result of openmatchRunResponse"
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
}
},
"externalDocs": {

@ -45,9 +45,14 @@ message Ticket {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 5;
// Customized information not inspected by Open Match, to be kept persistent
// throughout the life-cycle of a ticket.
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> persistent_field = 6;
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 6;
google.protobuf.Timestamp create_time = 7;
// Deprecated fields.
reserved 2;
@ -103,6 +108,25 @@ message DoubleRangeFilter {
// Minimum value.
double min = 3;
enum Exclude {
// No bounds should be excluded when evaluating the filter, i.e.: MIN <= x <= MAX
NONE = 0;
// Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN < x <= MAX
MIN = 1;
// Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN <= x < MAX
MAX = 2;
// Both bounds should be excluded when evaluating the filter, i.e.: MIN < x < MAX
BOTH = 3;
}
// Defines the bounds to apply when filtering tickets by their search_fields.double_args value.
// BETA FEATURE WARNING: This field and the associated values are
// not finalized and still subject to possible change or removal.
Exclude exclude = 4;
}
// Filters strings exactly equaling a value.
@ -201,6 +225,50 @@ message Match {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 7;
// Backfill request which contains additional information to the match
// and contains an association to a GameServer.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
Backfill backfill = 8;
// AllocateGameServer signalise Director that Backfill is new and it should
// allocate a GameServer, this Backfill would be assigned.
// BETA FEATURE WARNING: This field is not finalized and still subject
// to possible change or removal.
bool allocate_gameserver = 9;
// Deprecated fields.
reserved 5, 6;
}
// Represents a backfill entity which is used to fill partially full matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
message Backfill {
// Id represents an auto-generated Id issued by Open Match.
string id = 1;
// Search fields are the fields which Open Match is aware of, and can be used
// when specifying filters.
SearchFields search_fields = 2;
// Customized information not inspected by Open Match, to be used by
// the Match Function, evaluator, and components making calls to Open Match.
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 3;
// Customized information not inspected by Open Match, to be kept persistent
// throughout the life-cycle of a backfill.
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> persistent_field = 4;
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 5;
// Generation gets incremented on GameServers update operations.
// Prevents the MMF from overriding a newer version from the game server.
// Do NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs.
int64 generation = 6;
}

@ -19,9 +19,9 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
info: {
title: "MM Logic (Data Layer)"
version: "1.0"
@ -52,7 +52,7 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
}
// TODO Add annotations for security_defintiions.
// See
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/proto/examplepb/a_bit_of_everything.proto
// https://github.com/grpc-ecosystem/grpc-gateway/blob/master/examples/internal/proto/examplepb/a_bit_of_everything.proto
};
message QueryTicketsRequest {
@ -75,6 +75,20 @@ message QueryTicketIdsResponse {
repeated string ids = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsRequest {
// The Pool representing the set of Filters to be queried.
Pool pool = 1;
}
// BETA FEATURE WARNING: This Request messages are not finalized and
// still subject to possible change or removal.
message QueryBackfillsResponse {
// Backfills that meet all the filtering criteria requested by the pool.
repeated Backfill backfills = 1;
}
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
@ -98,4 +112,14 @@ service QueryService {
body: "*"
};
}
// QueryBackfills gets a list of Backfills.
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc QueryBackfills(QueryBackfillsRequest) returns (stream QueryBackfillsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/backfills:query"
body: "*"
};
}
}

@ -13,6 +13,11 @@
"url": "https://github.com/googleforgames/open-match/blob/master/LICENSE"
}
},
"tags": [
{
"name": "QueryService"
}
],
"schemes": [
"http",
"https"
@ -24,15 +29,24 @@
"application/json"
],
"paths": {
"/v1/queryservice/ticketids:query": {
"/v1/queryservice/backfills:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"summary": "QueryBackfills gets a list of Backfills.\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "QueryService_QueryBackfills",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryBackfillsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryBackfillsResponse"
}
},
"404": {
@ -41,6 +55,62 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "body",
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal.",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchQueryBackfillsRequest"
}
}
],
"tags": [
"QueryService"
]
}
},
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryService_QueryTicketIds",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -61,12 +131,21 @@
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a minimum of 10 and maximum of 10000.",
"operationId": "QueryTickets",
"operationId": "QueryService_QueryTickets",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketsResponse"
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/rpcStatus"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
}
},
"404": {
@ -75,6 +154,12 @@
"type": "string",
"format": "string"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
@ -94,6 +179,17 @@
}
},
"definitions": {
"DoubleRangeFilterExclude": {
"type": "string",
"enum": [
"NONE",
"MIN",
"MAX",
"BOTH"
],
"default": "NONE",
"title": "- NONE: No bounds should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c= MAX\n - MIN: Only the minimum bound should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c= MAX\n - MAX: Only the maximum bound should be excluded when evaluating the filter, i.e.: MIN \u003c= x \u003c MAX\n - BOTH: Both bounds should be excluded when evaluating the filter, i.e.: MIN \u003c x \u003c MAX"
},
"openmatchAssignment": {
"type": "object",
"properties": {
@ -111,6 +207,44 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchBackfill": {
"type": "object",
"properties": {
"id": {
"type": "string",
"description": "Id represents an auto-generated Id issued by Open Match."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
},
"extensions": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by\nthe Match Function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a backfill. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
},
"generation": {
"type": "string",
"format": "int64",
"description": "Generation gets incremented on GameServers update operations.\nPrevents the MMF from overriding a newer version from the game server.\nDo NOT read or write to this field, it is for internal tracking, and changing the value will cause bugs."
}
},
"description": "Represents a backfill entity which is used to fill partially full matches.\n\nBETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -127,6 +261,10 @@
"type": "number",
"format": "double",
"description": "Minimum value."
},
"exclude": {
"$ref": "#/definitions/DoubleRangeFilterExclude",
"description": "Defines the bounds to apply when filtering tickets by their search_fields.double_args value.\nBETA FEATURE WARNING: This field and the associated values are\nnot finalized and still subject to possible change or removal."
}
},
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
@ -141,6 +279,7 @@
"double_range_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
@ -148,12 +287,14 @@
"string_equals_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchStringEqualsFilter"
}
},
"tag_present_filters": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
@ -170,6 +311,30 @@
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchQueryBackfillsRequest": {
"type": "object",
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryBackfillsResponse": {
"type": "object",
"properties": {
"backfills": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchBackfill"
},
"description": "Backfills that meet all the filtering criteria requested by the pool."
}
},
"description": "BETA FEATURE WARNING: This Request messages are not finalized and \nstill subject to possible change or removal."
},
"openmatchQueryTicketIdsRequest": {
"type": "object",
"properties": {
@ -206,6 +371,7 @@
"tickets": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets that meet all the filtering criteria requested by the pool."
@ -284,6 +450,13 @@
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"persistent_field": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be kept persistent \nthroughout the life-cycle of a ticket. \nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
@ -295,68 +468,36 @@
"protobufAny": {
"type": "object",
"properties": {
"type_url": {
"@type": {
"type": "string",
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
},
"value": {
"type": "string",
"format": "byte",
"description": "Must be a valid serialized protocol buffer of the above specified type."
"description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics."
}
},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
"additionalProperties": {},
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
},
"runtimeStreamError": {
"rpcStatus": {
"type": "object",
"properties": {
"grpc_code": {
"code": {
"type": "integer",
"format": "int32"
},
"http_code": {
"type": "integer",
"format": "int32"
"format": "int32",
"description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]."
},
"message": {
"type": "string"
},
"http_status": {
"type": "string"
"type": "string",
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client."
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
},
"x-stream-definitions": {
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
},
"description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use."
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
},
"openmatchQueryTicketsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketsResponse"
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)."
}
},
"externalDocs": {

@ -16,15 +16,9 @@
# Open Match Script for Google Cloud Build #
################################################################################
# To run this locally:
# cloud-build-local --config=cloudbuild.yaml --dryrun=false --substitutions=_OM_VERSION=DEV .
# To run this remotely:
# example command-line invocation:
# gcloud builds submit --config=cloudbuild.yaml --substitutions=_OM_VERSION=DEV .
# Requires gcloud to be installed to work. (https://cloud.google.com/sdk/)
# gcloud auth login
# gcloud components install cloud-build-local
# This YAML contains all the build steps for building Open Match.
# All PRs are verified against this script to prevent build breakages and regressions.
@ -38,8 +32,11 @@
# Setup: Read-Write, similar to generate but steps that run before any other step.
# Some useful things to know about Cloud Build.
# The root of this repository is always stored in /workspace.
# Any modifications that occur within /workspace are persisted between builds anything else is forgotten.
# When your build executes, Cloud Build copies the contents of your repository to /workspace, the default working directory for Cloud Build.
# Learn more about working directories on the Build configuration overview page https://cloud.google.com/build/docs/build-config-file-schema
# - Modifications that occur within /workspace are persisted between build steps.
# - If you want to replicate the build process from this file locally, you'll need to
# clone the open match github repo and set HEAD to the commit you're trying to build.
# If a build step has intermediate files that need to be persisted for a future step then use volumes.
# An example of this is the go-vol which is where the pkg/ data for go mod is stored.
# More information here: https://cloud.google.com/cloud-build/docs/build-config#build_steps
@ -48,23 +45,31 @@
steps:
- id: 'Docker Image: open-match-build'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/open-match-build', '-f', 'Dockerfile.ci', '.']
name: gcr.io/kaniko-project/executor:latest
args: [
"--destination=gcr.io/$PROJECT_ID/open-match-build",
"--dockerfile=Dockerfile.ci",
"--cache=true",
"--cache-ttl=3600h",
]
waitFor: ['-']
#name: gcr.io/cloud-builders/docker
#args: ['build', '-t', 'gcr.io/$PROJECT_ID/open-match-build', '-f', 'Dockerfile.ci', '.']
#waitFor: ['-']
- id: 'Build: Clean'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'clean-third-party', 'clean-protos', 'clean-swagger-docs']
waitFor: ['Docker Image: open-match-build']
# - id: 'Test: Markdown'
# name: 'gcr.io/$PROJECT_ID/open-match-build'
# args: ['make', 'md-test']
# waitFor: ['Build: Clean']
- id: 'Setup: Download Dependencies'
- id: 'Test: Markdown'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'sync-deps']
args: ['make', 'md-test']
waitFor: ['Build: Clean']
- id: 'Setup: Clean Go Dependencies'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'clean-deps']
volumes:
- name: 'go-vol'
path: '/go'
@ -76,21 +81,37 @@ steps:
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Download Dependencies']
waitFor: ['Setup: Clean Go Dependencies']
- id: 'Test: Terraform Configuration'
- id: 'Build: Compile Protos'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'terraform-test']
args: ['make', 'golang-protos']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Build: Initialize Toolchain']
- id: 'Setup: Download Go Dependencies'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'sync-deps']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Build: Compile Protos']
- id: 'Build: Deployment Configs'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'SHORT_SHA=${SHORT_SHA}', 'update-chart-deps', 'install/yaml/']
waitFor: ['Build: Initialize Toolchain']
waitFor: ['Setup: Download Go Dependencies']
- id: 'Test: Terraform Configuration'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'terraform-test']
waitFor: ['Setup: Download Go Dependencies']
- id: 'Build: Assets'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'assets', '-j12']
args: ['make', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'assets', '-j12']
volumes:
- name: 'go-vol'
path: '/go'
@ -98,7 +119,7 @@ steps:
- id: 'Build: Binaries'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'GOPROXY=off', 'build', 'all', '-j12']
args: ['make', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'build', 'all', '-j12']
volumes:
- name: 'go-vol'
path: '/go'
@ -106,7 +127,9 @@ steps:
- id: 'Test: Services'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', 'GOPROXY=off', 'GOLANG_TEST_COUNT=10', 'test']
args: ['make', 'GOLANG_TEST_COUNT=3', 'test']
# When debugging failing tests, enable verbose 'go test' output, run additional passes
#args: ['make', 'GOLANG_EXTRA_TEST_FLAGS=-v', 'GOLANG_TEST_COUNT=10', 'test']
volumes:
- name: 'go-vol'
path: '/go'
@ -132,7 +155,7 @@ steps:
- id: 'Deploy: Deployment Configs'
name: 'gcr.io/$PROJECT_ID/open-match-build'
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', 'ci-deploy-artifacts']
args: ['make', '_GCB_POST_SUBMIT=${_GCB_POST_SUBMIT}', '_GCB_LATEST_VERSION=${_GCB_LATEST_VERSION}', 'SHORT_SHA=${SHORT_SHA}', 'BRANCH_NAME=${BRANCH_NAME}', '_CHARTS_BUCKET=${_CHARTS_BUCKET}', 'ci-deploy-artifacts']
waitFor: ['Lint: Format, Vet, Charts', 'Test: Deploy Open Match']
volumes:
- name: 'go-vol'
@ -153,22 +176,21 @@ steps:
artifacts:
objects:
location: '${_ARTIFACTS_BUCKET}'
location: '${_ARTIFACTS_BUCKET}${_OM_VERSION}'
paths:
- install/yaml/install.yaml
- install/yaml/01-open-match-core.yaml
- install/yaml/02-open-match-demo.yaml
- install/yaml/03-prometheus-chart.yaml
- install/yaml/04-grafana-chart.yaml
- install/yaml/05-jaeger-chart.yaml
- install/yaml/06-open-match-override-configmap.yaml
- install/yaml/*.yaml
- pkg/pb/*.pb.go
- pkg/pb/*.pb.gw.go
- internal/ipb/*.pb.go
- api/*.swagger.json
substitutions:
_OM_VERSION: "0.0.0-dev"
_OM_VERSION: "1.8.1"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/output/"
_ARTIFACTS_BUCKET: "gs://open-match-build-artifacts/"
_LOGS_BUCKET: "gs://open-match-build-logs/"
_CHARTS_BUCKET: "gs://open-match-chart"
logsBucket: '${_LOGS_BUCKET}'
options:
sourceProvenanceHash: ['SHA256']

@ -46,7 +46,7 @@ make
*Typically for contributing you'll want to
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
but for purpose of this guide we'll be using the upstream/master.*
but for purpose of this guide we'll be using the upstream/main.*
## Building code and images

7
docs/hugo_apiheader.txt Normal file

@ -0,0 +1,7 @@
---
title: "Open Match API References"
linkTitle: "Open Match API References"
weight: 2
description:
This document provides API references for Open Match services.
---

@ -37,7 +37,7 @@ func New() *ByteSub {
}
}
// AnnounceLatest writes b to all of the subscribers, with caviets listed in Subscribe.
// AnnounceLatest writes b to all of the subscribers, with caveats listed in Subscribe.
func (s *ByteSub) AnnounceLatest(b []byte) {
s.r.Lock()
defer s.r.Unlock()

@ -51,7 +51,7 @@ func TestFastAndSlow(t *testing.T) {
for count := 0; true; count++ {
if v := <-slow; v == "3" {
if count > 1 {
t.Error("Expected to recieve at most 1 other value on slow before recieving the latest value.")
t.Error("Expected to receive at most 1 other value on slow before receiving the latest value.")
}
break
}

@ -37,7 +37,7 @@ type Updater struct {
type SetFunc func(v interface{})
// New creates an Updater. Set is called when fields update, using the json
// sererialized value of Updater's tree. All updates after ctx is canceled are
// serialized value of Updater's tree. All updates after ctx is canceled are
// ignored.
func New(ctx context.Context, set func([]byte)) *Updater {
f := func(v interface{}) {

@ -0,0 +1,24 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM open-match-base-build as builder
WORKDIR /go/src/open-match.dev/open-match/examples/functions/golang/backfill
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o matchfunction .
FROM gcr.io/distroless/static:nonroot
WORKDIR /app/
COPY --from=builder --chown=nonroot /go/src/open-match.dev/open-match/examples/functions/golang/backfill/matchfunction /app/
ENTRYPOINT ["/app/matchfunction"]

@ -0,0 +1,33 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main defines a sample match function that uses the GRPC harness to set up
// the match making function as a service. This sample is a reference
// to demonstrate the usage of the GRPC harness and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package main
import (
"open-match.dev/open-match/examples/functions/golang/backfill/mmf"
)
const (
queryServiceAddr = "open-match-query.open-match.svc.cluster.local:50503" // Address of the QueryService endpoint.
serverPort = 50502 // The port for hosting the Match Function.
)
func main() {
mmf.Start(queryServiceAddr, serverPort)
}

@ -0,0 +1,297 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"time"
"log"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
const (
playersPerMatch = 2
openSlotsKey = "open-slots"
matchName = "backfill-matchfunction"
)
// matchFunctionService implements pb.MatchFunctionServer, the server generated
// by compiling the protobuf, by fulfilling the pb.MatchFunctionServer interface.
type matchFunctionService struct {
grpc *grpc.Server
queryServiceClient pb.QueryServiceClient
port int
}
func (s *matchFunctionService) Run(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
log.Printf("Generating proposals for function %v", req.GetProfile().GetName())
var proposals []*pb.Match
profile := req.GetProfile()
pools := profile.GetPools()
for _, p := range pools {
tickets, err := matchfunction.QueryPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query tickets for the given pool, got %s", err.Error())
return err
}
backfills, err := matchfunction.QueryBackfillPool(stream.Context(), s.queryServiceClient, p)
if err != nil {
log.Printf("Failed to query backfills for the given pool, got %s", err.Error())
return err
}
matches, err := makeMatches(profile, p, tickets, backfills)
if err != nil {
log.Printf("Failed to generate matches, got %s", err.Error())
return err
}
proposals = append(proposals, matches...)
}
log.Printf("Streaming %v proposals to Open Match", len(proposals))
// Stream the generated proposals back to Open Match.
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
log.Printf("Failed to stream proposals to Open Match, got %s", err.Error())
return err
}
}
return nil
}
// makeMatches tries to handle backfills at first, then it makes full matches, at the end it makes a match with backfill
// if tickets left
func makeMatches(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, backfills []*pb.Backfill) ([]*pb.Match, error) {
var matches []*pb.Match
newMatches, remainingTickets, err := handleBackfills(profile, tickets, backfills, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, newMatches...)
newMatches, remainingTickets = makeFullMatches(profile, remainingTickets, len(matches))
matches = append(matches, newMatches...)
if len(remainingTickets) > 0 {
match, err := makeMatchWithBackfill(profile, pool, remainingTickets, len(matches))
if err != nil {
return nil, err
}
matches = append(matches, match)
}
return matches, nil
}
// handleBackfills looks at each backfill's openSlots which is a number of required tickets,
// acquires that tickets, decreases openSlots in backfill and makes a match with updated backfill and associated tickets.
func handleBackfills(profile *pb.MatchProfile, tickets []*pb.Ticket, backfills []*pb.Backfill, lastMatchId int) ([]*pb.Match, []*pb.Ticket, error) {
matchId := lastMatchId
var matches []*pb.Match
for _, b := range backfills {
openSlots, err := getOpenSlots(b)
if err != nil {
return nil, tickets, err
}
var matchTickets []*pb.Ticket
for openSlots > 0 && len(tickets) > 0 {
matchTickets = append(matchTickets, tickets[0])
tickets = tickets[1:]
openSlots--
}
if len(matchTickets) > 0 {
err := setOpenSlots(b, openSlots)
if err != nil {
return nil, tickets, err
}
matchId++
match := newMatch(matchId, profile.Name, matchTickets, b)
matches = append(matches, &match)
}
}
return matches, tickets, nil
}
// makeMatchWithBackfill makes not full match, creates backfill for it with openSlots = playersPerMatch-len(tickets).
func makeMatchWithBackfill(profile *pb.MatchProfile, pool *pb.Pool, tickets []*pb.Ticket, lastMatchId int) (*pb.Match, error) {
if len(tickets) == 0 {
return nil, fmt.Errorf("tickets are required")
}
if len(tickets) >= playersPerMatch {
return nil, fmt.Errorf("too many tickets")
}
matchId := lastMatchId
searchFields := newSearchFields(pool)
backfill, err := newBackfill(searchFields, playersPerMatch-len(tickets))
if err != nil {
return nil, err
}
matchId++
match := newMatch(matchId, profile.Name, tickets, backfill)
// indicates that it is a new match and new game server should be allocated for it
match.AllocateGameserver = true
return &match, nil
}
// makeFullMatches makes matches without backfill
func makeFullMatches(profile *pb.MatchProfile, tickets []*pb.Ticket, lastMatchId int) ([]*pb.Match, []*pb.Ticket) {
ticketNum := 0
matchId := lastMatchId
var matches []*pb.Match
for ticketNum < playersPerMatch && len(tickets) >= playersPerMatch {
ticketNum++
if ticketNum == playersPerMatch {
matchId++
match := newMatch(matchId, profile.Name, tickets[:playersPerMatch], nil)
matches = append(matches, &match)
tickets = tickets[playersPerMatch:]
ticketNum = 0
}
}
return matches, tickets
}
// newSearchFields creates search fields based on pool's search criteria. This is just example of how it can be done.
func newSearchFields(pool *pb.Pool) *pb.SearchFields {
searchFields := pb.SearchFields{}
rangeFilters := pool.GetDoubleRangeFilters()
if rangeFilters != nil {
doubleArgs := make(map[string]float64)
for _, f := range rangeFilters {
doubleArgs[f.DoubleArg] = (f.Max - f.Min) / 2
}
if len(doubleArgs) > 0 {
searchFields.DoubleArgs = doubleArgs
}
}
stringFilters := pool.GetStringEqualsFilters()
if stringFilters != nil {
stringArgs := make(map[string]string)
for _, f := range stringFilters {
stringArgs[f.StringArg] = f.Value
}
if len(stringArgs) > 0 {
searchFields.StringArgs = stringArgs
}
}
tagFilters := pool.GetTagPresentFilters()
if tagFilters != nil {
tags := make([]string, len(tagFilters))
for _, f := range tagFilters {
tags = append(tags, f.Tag)
}
if len(tags) > 0 {
searchFields.Tags = tags
}
}
return &searchFields
}
func newBackfill(searchFields *pb.SearchFields, openSlots int) (*pb.Backfill, error) {
b := pb.Backfill{
SearchFields: searchFields,
Generation: 0,
CreateTime: timestamppb.Now(),
}
err := setOpenSlots(&b, int32(openSlots))
return &b, err
}
func newMatch(num int, profile string, tickets []*pb.Ticket, b *pb.Backfill) pb.Match {
t := time.Now().Format("2006-01-02T15:04:05.00")
return pb.Match{
MatchId: fmt.Sprintf("profile-%s-time-%s-num-%d", profile, t, num),
MatchProfile: profile,
MatchFunction: matchName,
Tickets: tickets,
Backfill: b,
}
}
func setOpenSlots(b *pb.Backfill, val int32) error {
if b.Extensions == nil {
b.Extensions = make(map[string]*anypb.Any)
}
any, err := anypb.New(&wrapperspb.Int32Value{Value: val})
if err != nil {
return err
}
b.Extensions[openSlotsKey] = any
return nil
}
func getOpenSlots(b *pb.Backfill) (int32, error) {
if b == nil {
return 0, fmt.Errorf("expected backfill is not nil")
}
if b.Extensions != nil {
if any, ok := b.Extensions[openSlotsKey]; ok {
var val wrapperspb.Int32Value
err := any.UnmarshalTo(&val)
if err != nil {
return 0, err
}
return val.Value, nil
}
}
return playersPerMatch, nil
}

@ -0,0 +1,141 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mmf
import (
"testing"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/wrapperspb"
"open-match.dev/open-match/pkg/pb"
)
func TestHandleBackfills(t *testing.T) {
for _, tc := range []struct {
name string
tickets []*pb.Ticket
backfills []*pb.Backfill
lastMatchId int
expectedMatchLen int
expectedTicketLen int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns no matches when no backfills specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when no tickets specified", expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns a match with open slots decreased", tickets: []*pb.Ticket{{Id: "1"}}, backfills: []*pb.Backfill{withOpenSlots(1)}, expectedMatchLen: 1, expectedTicketLen: 0, expectedOpenSlots: playersPerMatch - 2},
} {
testCase := tc
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets, err := handleBackfills(&profile, testCase.tickets, testCase.backfills, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
require.Equal(t, testCase.expectedTicketLen, len(tickets))
if err != nil {
require.Equal(t, 0, len(matches))
} else {
for _, m := range matches {
require.NotNil(t, m.Backfill)
openSlots, err := getOpenSlots(m.Backfill)
require.NoError(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
}
})
}
}
func TestMakeMatchWithBackfill(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedOpenSlots int32
expectedErr bool
}{
{name: "returns an error when length of tickets is greater then playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}, {Id: "5"}}, expectedErr: true},
{name: "returns an error when length of tickets is equal to playerPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}, {Id: "3"}, {Id: "4"}}, expectedErr: true},
{name: "returns an error when no tickets are provided", expectedErr: true},
{name: "returns a match with backfill", tickets: []*pb.Ticket{{Id: "1"}}, expectedOpenSlots: playersPerMatch - 1},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
pool := pb.Pool{}
profile := pb.MatchProfile{Name: "matchProfile"}
match, err := makeMatchWithBackfill(&profile, &pool, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedErr, err != nil)
if err == nil {
require.NotNil(t, match)
require.NotNil(t, match.Backfill)
require.True(t, match.AllocateGameserver)
require.Equal(t, "", match.Backfill.Id)
openSlots, err := getOpenSlots(match.Backfill)
require.Nil(t, err)
require.Equal(t, testCase.expectedOpenSlots, openSlots)
}
})
}
}
func TestMakeFullMatches(t *testing.T) {
for _, testCase := range []struct {
name string
tickets []*pb.Ticket
lastMatchId int
expectedMatchLen int
expectedTicketLen int
}{
{name: "returns no matches when there are no tickets", tickets: []*pb.Ticket{}, expectedMatchLen: 0, expectedTicketLen: 0},
{name: "returns no matches when length of tickets is less then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}}, expectedMatchLen: 0, expectedTicketLen: 1},
{name: "returns a match when length of tickets is greater then playersPerMatch", tickets: []*pb.Ticket{{Id: "1"}, {Id: "2"}}, expectedMatchLen: 1, expectedTicketLen: 0},
} {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
profile := pb.MatchProfile{Name: "matchProfile"}
matches, tickets := makeFullMatches(&profile, testCase.tickets, testCase.lastMatchId)
require.Equal(t, testCase.expectedMatchLen, len(matches))
require.Equal(t, testCase.expectedTicketLen, len(tickets))
for _, m := range matches {
require.Nil(t, m.Backfill)
require.Equal(t, playersPerMatch, len(m.Tickets))
}
})
}
}
func withOpenSlots(openSlots int) *pb.Backfill {
val, err := anypb.New(&wrapperspb.Int32Value{Value: int32(openSlots)})
if err != nil {
panic(err)
}
return &pb.Backfill{
Extensions: map[string]*anypb.Any{
openSlotsKey: val,
},
}
}

@ -0,0 +1,59 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mmf provides a sample match function that uses the GRPC harness to set up 1v1 matches.
// This sample is a reference to demonstrate the usage of backfill and should only be used as
// a starting point for your match function. You will need to modify the
// matchmaking logic in this function based on your game's requirements.
package mmf
import (
"fmt"
"log"
"net"
"google.golang.org/grpc"
"open-match.dev/open-match/pkg/pb"
)
func Start(queryServiceAddr string, serverPort int) {
// Connect to QueryService.
conn, err := grpc.Dial(queryServiceAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("Failed to connect to Open Match, got %s", err.Error())
}
defer conn.Close()
mmfService := matchFunctionService{
queryServiceClient: pb.NewQueryServiceClient(conn),
}
// Create and host a new gRPC service on the configured port.
server := grpc.NewServer()
pb.RegisterMatchFunctionServer(server, &mmfService)
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", serverPort))
if err != nil {
log.Fatalf("TCP net listener initialization failed for port %v, got %s", serverPort, err.Error())
}
log.Printf("TCP net listener initialized for port %v", serverPort)
err = server.Serve(ln)
if err != nil {
log.Fatalf("gRPC serve failed, got %s", err.Error())
}
}

@ -40,16 +40,16 @@ var (
activeScenario = scenarios.ActiveScenario
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
mTicketsDeleted = telemetry.Counter("scale_backend_tickets_deleted", "tickets deleted")
mTicketDeletesFailed = telemetry.Counter("scale_backend_ticket_deletes_failed", "ticket deletes failed")
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
mFetchMatchSuccesses = telemetry.Counter("scale_backend_fetch_match_successes", "fetch match successes")
mFetchMatchErrors = telemetry.Counter("scale_backend_fetch_match_errors", "fetch match errors")
mMatchesReturned = telemetry.Counter("scale_backend_matches_returned", "matches returned")
mSumTicketsReturned = telemetry.Counter("scale_backend_sum_tickets_returned", "tickets in matches returned")
mMatchesAssigned = telemetry.Counter("scale_backend_matches_assigned", "matches assigned")
mMatchAssignsFailed = telemetry.Counter("scale_backend_match_assigns_failed", "match assigns failed")
mBackfillsDeleted = telemetry.Counter("scale_backend_backfills_deleted", "backfills deleted")
mBackfillDeletesFailed = telemetry.Counter("scale_backend_backfill_deletes_failed", "backfill deletes failed")
)
// Run triggers execution of functions that continuously fetch, assign and
@ -79,12 +79,28 @@ func run(cfg config.View) {
w := logger.Writer()
defer w.Close()
matchesForAssignment := make(chan *pb.Match, 30000)
ticketsForDeletion := make(chan string, 30000)
matchesToAssign := make(chan *pb.Match, 30000)
for i := 0; i < 50; i++ {
go runAssignments(be, matchesForAssignment, ticketsForDeletion)
go runDeletions(fe, ticketsForDeletion)
if activeScenario.BackendAssignsTickets {
for i := 0; i < 100; i++ {
go runAssignments(be, matchesToAssign)
}
}
backfillsToDelete := make(chan *pb.Backfill, 30000)
if activeScenario.BackendDeletesBackfills {
for i := 0; i < 100; i++ {
go runDeleteBackfills(fe, backfillsToDelete)
}
}
matchesToAcknowledge := make(chan *pb.Match, 30000)
if activeScenario.BackendAcknowledgesBackfills {
for i := 0; i < 100; i++ {
go runAcknowledgeBackfills(fe, matchesToAcknowledge, backfillsToDelete)
}
}
// Don't go faster than this, as it likely means that FetchMatches is throwing
@ -98,7 +114,7 @@ func run(cfg config.View) {
wg.Add(1)
go func(wg *sync.WaitGroup, p *pb.MatchProfile) {
defer wg.Done()
runFetchMatches(be, p, matchesForAssignment)
runFetchMatches(be, p, matchesToAssign, matchesToAcknowledge)
}(&wg, p)
}
@ -108,13 +124,13 @@ func run(cfg config.View) {
}
}
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForAssignment chan<- *pb.Match) {
func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesToAssign chan<- *pb.Match, matchesToAcknowledge chan<- *pb.Match) {
ctx, span := trace.StartSpan(context.Background(), "scale.backend/FetchMatches")
defer span.End()
req := &pb.FetchMatchesRequest{
Config: &pb.FunctionConfig{
Host: "om-function",
Host: "open-match-function",
Port: 50502,
Type: pb.FunctionConfig_GRPC,
},
@ -146,62 +162,90 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
matchesForAssignment <- resp.GetMatch()
if activeScenario.BackendAssignsTickets {
matchesToAssign <- resp.GetMatch()
}
if activeScenario.BackendAcknowledgesBackfills {
matchesToAcknowledge <- resp.GetMatch()
}
}
}
func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.Match, ticketsForDeletion chan<- string) {
func runDeleteBackfills(fe pb.FrontendServiceClient, backfillsToDelete <-chan *pb.Backfill) {
for b := range backfillsToDelete {
if !activeScenario.BackfillDeleteCond(b) {
continue
}
ctx := context.Background()
_, err := fe.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: b.Id})
if err != nil {
logger.WithError(err).Errorf("failed to delete backfill: %s", b.Id)
telemetry.RecordUnitMeasurement(ctx, mBackfillDeletesFailed)
} else {
telemetry.RecordUnitMeasurement(ctx, mBackfillsDeleted)
}
}
}
func runAcknowledgeBackfills(fe pb.FrontendServiceClient, matchesToAcknowledge <-chan *pb.Match, backfillsToDelete chan<- *pb.Backfill) {
for m := range matchesToAcknowledge {
backfillId := m.Backfill.GetId()
if backfillId == "" {
continue
}
err := acknowledgeBackfill(fe, backfillId)
if err != nil {
logger.WithError(err).Errorf("failed to acknowledge backfill: %s", backfillId)
continue
}
if activeScenario.BackendDeletesBackfills {
backfillsToDelete <- m.Backfill
}
}
}
func acknowledgeBackfill(fe pb.FrontendServiceClient, backfillId string) error {
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/AcknowledgeBackfill")
defer span.End()
_, err := fe.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{
BackfillId: backfillId,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
})
return err
}
func runAssignments(be pb.BackendServiceClient, matchesToAssign <-chan *pb.Match) {
ctx := context.Background()
for m := range matchesForAssignment {
for m := range matchesToAssign {
ids := []string{}
for _, t := range m.Tickets {
ids = append(ids, t.GetId())
}
if activeScenario.BackendAssignsTickets {
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
continue
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
},
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
continue
}
for _, id := range ids {
ticketsForDeletion <- id
}
}
}
func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string) {
ctx := context.Background()
for id := range ticketsForDeletion {
if activeScenario.BackendDeletesTickets {
req := &pb.DeleteTicketRequest{
TicketId: id,
}
_, err := fe.DeleteTicket(context.Background(), req)
if err == nil {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
logger.WithError(err).Error("failed to delete tickets")
}
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
}
}

@ -38,12 +38,22 @@ var (
})
activeScenario = scenarios.ActiveScenario
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
mTicketsDeleted = telemetry.Counter("scale_frontend_tickets_deleted", "tickets deleted")
mTicketDeletesFailed = telemetry.Counter("scale_frontend_ticket_deletes_failed", "ticket deletes failed")
mBackfillsCreated = telemetry.Counter("scale_frontend_backfills_created", "backfills_created")
mBackfillCreationsFailed = telemetry.Counter("scale_frontend_backfill_creations_failed", "backfill creations failed")
mTicketsTimeToAssignment = telemetry.HistogramWithBounds("scale_frontend_tickets_time_to_assignment", "tickets time to assignment", stats.UnitMilliseconds, []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000})
)
type ticketToWatch struct {
id string
createdAt time.Time
}
// Run triggers execution of the scale frontend component that creates
// tickets at scale in Open Match.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
@ -61,9 +71,12 @@ func run(cfg config.View) {
}
fe := pb.NewFrontendServiceClient(conn)
if activeScenario.FrontendCreatesBackfillsOnStart {
createBackfills(fe, activeScenario.FrontendTotalBackfillsToCreate)
}
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
totalCreated := 0
for range time.Tick(time.Second) {
@ -89,13 +102,27 @@ func runner(fe pb.FrontendServiceClient) {
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
g.start(mRunnersCreating)
createdAt := time.Now()
id, err := createTicket(ctx, fe)
if err != nil {
logger.WithError(err).Error("failed to create a ticket")
return
}
_ = id
err = watchAssignments(ctx, fe, ticketToWatch{id: id, createdAt: createdAt})
if err != nil {
logger.WithError(err).Errorf("failed to get ticket assignment: %s", id)
} else {
ms := time.Since(createdAt).Nanoseconds() / 1e6
stats.Record(ctx, mTicketsTimeToAssignment.M(ms))
}
if activeScenario.FrontendDeletesTickets {
err = deleteTicket(ctx, fe, id)
if err != nil {
logger.WithError(err).Errorf("failed to delete ticket: %s", id)
}
}
}
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
@ -116,6 +143,68 @@ func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, err
return resp.Id, nil
}
func watchAssignments(ctx context.Context, fe pb.FrontendServiceClient, ticket ticketToWatch) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := fe.WatchAssignments(ctx, &pb.WatchAssignmentsRequest{TicketId: ticket.id})
if err != nil {
return err
}
var a *pb.Assignment
for a.GetConnection() == "" {
resp, err := stream.Recv()
if err != nil {
return err
}
a = resp.Assignment
}
return nil
}
func createBackfills(fe pb.FrontendServiceClient, numBackfillsToCreate int) error {
for i := 0; i < numBackfillsToCreate; i++ {
err := createBackfill(fe)
if err != nil {
return err
}
}
return nil
}
func createBackfill(fe pb.FrontendServiceClient) error {
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateBackfill")
defer span.End()
req := pb.CreateBackfillRequest{
Backfill: activeScenario.Backfill(),
}
_, err := fe.CreateBackfill(ctx, &req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mBackfillCreationsFailed)
logger.WithError(err).Error("failed to create backfill")
return err
}
telemetry.RecordUnitMeasurement(ctx, mBackfillsCreated)
return nil
}
func deleteTicket(ctx context.Context, fe pb.FrontendServiceClient, ticketId string) error {
_, err := fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticketId})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
}
return err
}
// Allows concurrent moficiation of a gauge value by modifying the concurrent
// value with a delta.
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {

@ -0,0 +1,270 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package backfill
import (
"fmt"
"io"
"time"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/wrapperspb"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
openSlotsKey = "open-slots"
)
func Scenario() *BackfillScenario {
ticketsPerMatch := 4
return &BackfillScenario{
TicketsPerMatch: ticketsPerMatch,
MaxTicketsPerNotFullMatch: 3,
BackfillDeleteCond: func(b *pb.Backfill) bool {
openSlots := getOpenSlots(b, ticketsPerMatch)
return openSlots <= 0
},
}
}
type BackfillScenario struct {
TicketsPerMatch int
MaxTicketsPerNotFullMatch int
BackfillDeleteCond func(*pb.Backfill) bool
}
func (s *BackfillScenario) Profiles() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
Pools: []*pb.Pool{
{
Name: poolName,
},
},
},
}
}
func (s *BackfillScenario) Ticket() *pb.Ticket {
return &pb.Ticket{}
}
func (s *BackfillScenario) Backfill() *pb.Backfill {
return &pb.Backfill{}
}
func (s *BackfillScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
return statefullMMF(p, poolBackfills, poolTickets, s.TicketsPerMatch, s.MaxTicketsPerNotFullMatch)
}
// statefullMMF is a MMF implementation which is used in scenario when we want MMF to create not full match and fill it later.
// 1. The first FetchMatches is called
// 2. MMF grabs maxTicketsPerNotFullMatch tickets and makes a match and new backfill for it
// 3. MMF sets backfill's open slots to ticketsPerMatch - maxTicketsPerNotFullMatch
// 4. MMF returns the match as a result
// 5. The second FetchMatches is called
// 6. MMF gets previously created backfill
// 7. MMF gets backfill's open slots value
// 8. MMF grabs openSlots tickets and makes a match with previously created backfill
// 9. MMF sets backfill's open slots to 0
// 10. MMF returns the match as a result
func statefullMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int, maxTicketsPerNotFullMatch int) ([]*pb.Match, error) {
var matches []*pb.Match
for pool, backfills := range poolBackfills {
tickets, ok := poolTickets[pool]
if !ok || len(tickets) == 0 {
// no tickets in pool
continue
}
// process backfills first
for _, b := range backfills {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
openSlots := getOpenSlots(b, ticketsPerMatch)
if openSlots <= 0 {
// no free open slots
continue
}
if l > openSlots {
l = openSlots
}
setOpenSlots(b, openSlots-l)
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: b,
})
tickets = tickets[l:]
}
// create not full matches with backfill
for {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
if l > maxTicketsPerNotFullMatch {
l = maxTicketsPerNotFullMatch
}
b := pb.Backfill{}
setOpenSlots(&b, ticketsPerMatch-l)
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: &b,
AllocateGameserver: true,
})
tickets = tickets[l:]
}
}
return matches, nil
}
func getOpenSlots(b *pb.Backfill, defaultVal int) int {
if b.Extensions == nil {
return defaultVal
}
any, ok := b.Extensions[openSlotsKey]
if !ok {
return defaultVal
}
var val wrapperspb.Int32Value
err := any.UnmarshalTo(&val)
if err != nil {
panic(err)
}
return int(val.Value)
}
func setOpenSlots(b *pb.Backfill, val int) {
if b.Extensions == nil {
b.Extensions = make(map[string]*anypb.Any)
}
any, err := anypb.New(&wrapperspb.Int32Value{Value: int32(val)})
if err != nil {
panic(err)
}
b.Extensions[openSlotsKey] = any
}
// statelessMMF is a MMF implementation which is used in scenario when we want MMF to fill backfills created by a Gameserver. It doesn't create
// or update any backfill.
// 1. FetchMatches is called
// 2. MMF gets a backfill
// 3. MMF grabs ticketsPerMatch tickets and makes a match with the backfill
// 4. MMF returns the match as a result
func statelessMMF(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket, ticketsPerMatch int) ([]*pb.Match, error) {
var matches []*pb.Match
for pool, backfills := range poolBackfills {
tickets, ok := poolTickets[pool]
if !ok || len(tickets) == 0 {
// no tickets in pool
continue
}
for _, b := range backfills {
l := len(tickets)
if l == 0 {
// no tickets left
break
}
if l > ticketsPerMatch && ticketsPerMatch > 0 {
l = ticketsPerMatch
}
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[0:l],
MatchProfile: p.GetName(),
MatchFunction: "backfill",
Backfill: b,
})
tickets = tickets[l:]
}
}
return matches, nil
}
func (s *BackfillScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
tickets := map[string]struct{}{}
backfills := map[string]struct{}{}
matchIds := []string{}
outer:
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read evaluator input stream: %w", err)
}
m := req.GetMatch()
if _, ok := backfills[m.Backfill.Id]; ok {
continue outer
}
for _, t := range m.Tickets {
if _, ok := tickets[t.Id]; ok {
continue outer
}
}
for _, t := range m.Tickets {
tickets[t.Id] = struct{}{}
}
matchIds = append(matchIds, m.GetMatchId())
}
for _, id := range matchIds {
err := stream.Send(&pb.EvaluateResponse{MatchId: id})
if err != nil {
return fmt.Errorf("failed to sending evaluator output stream: %w", err)
}
}
return nil
}

@ -78,7 +78,11 @@ func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
}
}
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (b *BattleRoyalScenario) Backfill() *pb.Backfill {
return nil
}
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
@ -101,7 +105,7 @@ func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// TODO: once the evaluator client supports sending and receiving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

@ -33,7 +33,7 @@ func Scenario() *FirstMatchScenario {
type FirstMatchScenario struct {
}
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
func (*FirstMatchScenario) Profiles() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
@ -46,11 +46,15 @@ func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
}
}
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
func (*FirstMatchScenario) Ticket() *pb.Ticket {
return &pb.Ticket{}
}
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (*FirstMatchScenario) Backfill() *pb.Backfill {
return nil
}
func (*FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
tickets := poolTickets[poolName]
var matches []*pb.Match
@ -68,10 +72,10 @@ func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[s
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
func (*FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// TODO: once the evaluator client supports sending and receiving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}

@ -19,9 +19,8 @@ import (
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
"open-match.dev/open-match/examples/scale/scenarios/backfill"
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
@ -40,11 +39,14 @@ type GameScenario interface {
// Ticket creates a new ticket, with randomized parameters.
Ticket() *pb.Ticket
// Backfill creates a new backfill, with randomized parameters.
Backfill() *pb.Backfill
// Profiles lists all of the profiles that should run.
Profiles() []*pb.MatchProfile
// MatchFunction is the custom logic implementation of the match function.
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
// Evaluate is the custom logic implementation of the evaluator.
Evaluate(stream pb.Evaluator_EvaluateServer) error
@ -56,18 +58,26 @@ var ActiveScenario = func() *Scenario {
// TODO: Select which scenario to use based on some configuration or choice,
// so it's easier to run different scenarios without changing code.
gs = battleroyal.Scenario()
gs = teamshooter.Scenario()
//gs = battleroyal.Scenario()
//gs = teamshooter.Scenario()
s := backfill.Scenario()
gs = s
return &Scenario{
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
FrontendCreatesBackfillsOnStart: true,
FrontendTotalBackfillsToCreate: 1000,
FrontendDeletesTickets: true,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
BackendAssignsTickets: false,
BackendAcknowledgesBackfills: true,
BackendDeletesBackfills: true,
Ticket: gs.Ticket,
Profiles: gs.Profiles,
Ticket: gs.Ticket,
Backfill: gs.Backfill,
BackfillDeleteCond: s.BackfillDeleteCond,
Profiles: gs.Profiles,
MMF: queryPoolsWrapper(gs.MatchFunction),
Evaluator: gs.Evaluate,
@ -87,17 +97,23 @@ type Scenario struct {
// TicketExtensionSize int
// PendingTicketNumber int
// MatchExtensionSize int
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
FrontendTicketCreatedQPS uint32
FrontendTicketCreatedQPS uint32
FrontendTotalTicketsToCreate int // TotalTicketsToCreate = -1 let scale-frontend create tickets forever
FrontendTotalBackfillsToCreate int
FrontendCreatesBackfillsOnStart bool
FrontendDeletesTickets bool
// GameBackend Configs
// ProfileNumber int
// FilterNumber int
BackendAssignsTickets bool
BackendDeletesTickets bool
BackendAssignsTickets bool
BackendAcknowledgesBackfills bool
BackendDeletesBackfills bool
Ticket func() *pb.Ticket
Profiles func() []*pb.MatchProfile
Ticket func() *pb.Ticket
Backfill func() *pb.Backfill
BackfillDeleteCond func(*pb.Backfill) bool
Profiles func() []*pb.MatchProfile
MMF matchFunction
Evaluator evaluatorFunction
@ -122,7 +138,7 @@ func getQueryServiceGRPCClient() pb.QueryServiceClient {
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
@ -136,7 +152,12 @@ func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Tic
return err
}
proposals, err := mmf(req.GetProfile(), poolTickets)
poolBackfills, err := matchfunction.QueryBackfillPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolBackfills, poolTickets)
if err != nil {
return err
}

@ -18,10 +18,13 @@
// arguments used:
// mode: The game mode the players wants to play in. mode is a hard partition.
// regions: Players may have good latency to one or more regions. A player will
// search for matches in all eligible regions.
//
// search for matches in all eligible regions.
//
// skill: Players have a random skill based on a normal distribution. Players
// will only be matched with other players who have a close skill value. The
// match functions have overlapping partitions of the skill brackets.
//
// will only be matched with other players who have a close skill value. The
// match functions have overlapping partitions of the skill brackets.
package teamshooter
import (
@ -32,9 +35,8 @@ import (
"sort"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/wrapperspb"
"open-match.dev/open-match/pkg/pb"
)
@ -154,9 +156,13 @@ func (t *TeamShooterScenario) Ticket() *pb.Ticket {
}
}
func (t *TeamShooterScenario) Backfill() *pb.Backfill {
return nil
}
// MatchFunction puts tickets into matches based on their skill, finding the
// required number of tickets for a game within the maximum skill difference.
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolBackfills map[string][]*pb.Backfill, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
skill := func(t *pb.Ticket) float64 {
return t.SearchFields.DoubleArgs[skillArg]
}
@ -261,9 +267,9 @@ type matchExt struct {
}
func unpackMatch(m *pb.Match) (*matchExt, error) {
v := &wrappers.DoubleValue{}
v := &wrapperspb.DoubleValue{}
err := ptypes.UnmarshalAny(m.Extensions["quality"], v)
err := m.Extensions["quality"].UnmarshalTo(v)
if err != nil {
return nil, fmt.Errorf("Error unpacking match quality: %w", err)
}
@ -278,9 +284,9 @@ func unpackMatch(m *pb.Match) (*matchExt, error) {
}
func (m *matchExt) pack() (*pb.Match, error) {
v := &wrappers.DoubleValue{Value: m.quality}
v := &wrapperspb.DoubleValue{Value: m.quality}
a, err := ptypes.MarshalAny(v)
a, err := anypb.New(v)
if err != nil {
return nil, fmt.Errorf("Error packing match quality: %w", err)
}
@ -290,7 +296,7 @@ func (m *matchExt) pack() (*pb.Match, error) {
Tickets: m.tickets,
MatchProfile: m.matchProfile,
MatchFunction: m.matchFunction,
Extensions: map[string]*any.Any{
Extensions: map[string]*anypb.Any{
"quality": a,
},
}, nil

150
go.mod

@ -15,58 +15,116 @@ module open-match.dev/open-match
// limitations under the License.
// When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
go 1.14
go 1.21
require (
cloud.google.com/go v0.47.0 // indirect
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/ocagent v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
contrib.go.opencensus.io/exporter/ocagent v0.7.0
contrib.go.opencensus.io/exporter/prometheus v0.4.2
contrib.go.opencensus.io/exporter/stackdriver v0.13.14
github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb
github.com/TV4/logrus-stackdriver-formatter v0.1.0
github.com/alicebob/miniredis/v2 v2.11.0
github.com/apache/thrift v0.13.0 // indirect
github.com/aws/aws-sdk-go v1.25.27 // indirect
github.com/alicebob/miniredis/v2 v2.30.5
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/fsnotify/fsnotify v1.4.7
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/golang/protobuf v1.3.2
github.com/fsnotify/fsnotify v1.6.0
github.com/go-redsync/redsync/v4 v4.8.1
github.com/golang/protobuf v1.5.3
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
github.com/grpc-ecosystem/grpc-gateway v1.12.0
github.com/imdario/mergo v0.3.8 // indirect
github.com/json-iterator/go v1.1.8 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/pelletier/go-toml v1.6.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.2.1
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.4.2
github.com/spf13/afero v1.2.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.16.0
github.com/rs/xid v1.5.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/viper v1.16.0
github.com/stretchr/testify v1.8.4
go.opencensus.io v0.24.0
golang.org/x/net v0.25.0
golang.org/x/sync v0.3.0
google.golang.org/genproto v0.0.0-20230815205213-6bfd019c3878 // indirect
google.golang.org/grpc v1.57.1
google.golang.org/protobuf v1.34.1
k8s.io/api v0.28.0 // kubernetes-1.14.10
k8s.io/apimachinery v0.28.0
k8s.io/client-go v0.28.0
)
require (
google.golang.org/genproto/googleapis/api v0.0.0-20230815205213-6bfd019c3878
google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878
)
require (
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/monitoring v1.15.1 // indirect
cloud.google.com/go/trace v1.10.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect
github.com/aws/aws-sdk-go v1.44.324 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.5 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/prometheus/prometheus v0.46.0 // indirect
github.com/prometheus/statsd_exporter v0.24.0 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stretchr/testify v1.4.0
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.13.0 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6
google.golang.org/grpc v1.25.0
github.com/subosito/gotenv v1.6.0 // indirect
github.com/yuin/gopher-lua v1.1.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/api v0.137.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.2.5 // indirect
k8s.io/api v0.0.0-20191004102255-dacd7df5a50b // kubernetes-1.13.12
k8s.io/apimachinery v0.0.0-20191004074956-01f8b7d1121a // kubernetes-1.13.12
k8s.io/client-go v0.0.0-20191004102537-eb5b9a8cfde7 // kubernetes-1.13.12
k8s.io/klog v1.0.0 // indirect
sigs.k8s.io/yaml v1.1.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230811205723-7ac0aad8c58d // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

861
go.sum

File diff suppressed because it is too large Load Diff

@ -5,7 +5,7 @@ Templates under the `templates/` directory are for the core components in Open M
Open Match also provides templates for optional components that are disabled by default under the `subcharts/` directory.
1. `open-match-customize` contains flexible templates to deploy your own matchfunction and evaluator.
2. `open-match-telemetry` contains monitoring supports for Open Match, you may choose to enable/disable [jaeger](https://www.jaegertracing.io/ "jaeger"), [prometheus](http://prometheus.io "prometheus"), [stackdriver](https://cloud.google.com/stackdriver/ "stackdriver"), and [grafana](https://grafana.com/ "grafana") by overriding the config values in the provided templates.
2. `open-match-telemetry` contains monitoring supports for Open Match, you may choose to enable/disable [prometheus](http://prometheus.io "prometheus"), [Observability in Google Cloud](https://cloud.google.com/stackdriver/docs "Observability in Google Cloud"), and [grafana](https://grafana.com/ "grafana") by overriding the config values in the provided templates.
You may control the behavior of Open Match by overriding the configs in `install/helm/open-match/values.yaml` file. Here are a few examples:
@ -26,7 +26,7 @@ swaggerui:
global:
kubernetes:
service:
- portType: ClusterIP
- portType: ClusterIP
+ portType: LoadBalancer
# Enables grafana support in Open Match

@ -13,13 +13,13 @@
# limitations under the License.
apiVersion: v2
appVersion: "0.0.0-dev"
version: 0.0.0-dev
appVersion: "1.8.1"
version: 1.8.1
name: open-match
dependencies:
- name: redis
version: 9.5.0
repository: https://kubernetes-charts.storage.googleapis.com/
version: 17.15.4
repository: https://charts.bitnami.com/bitnami
condition: open-match-core.redis.enabled
- name: open-match-telemetry
version: 0.0.0-dev

@ -54,7 +54,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.evaluator.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
{{- include "openmatch.HorizontalPodAutoscaler.evaluator.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
@ -82,6 +82,7 @@ spec:
component: evaluator
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.evaluatorConfigs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}

@ -54,7 +54,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatchcustomize.function.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
{{- include "openmatch.HorizontalPodAutoscaler.matchfunction.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment
@ -83,6 +83,7 @@ spec:
component: matchfunction
release: {{ .Release.Name }}
spec:
{{- include "openmatch.labels.nodegrouping" . | nindent 6 }}
volumes:
{{- include "openmatch.volumes.configs" (. | merge (dict "configs" .Values.mmfConfigs)) | nindent 8}}
{{- include "openmatch.volumes.tls" . | nindent 8}}

@ -18,13 +18,15 @@
"links": [],
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 16,
"id": 28,
"panels": [],
"title": "Iterations",
"type": "row"
},
@ -130,11 +132,317 @@
"x": 0,
"y": 9
},
"id": 16,
"panels": [],
"title": "Backfills",
"type": "row"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 10
},
"id": 30,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_backfills_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backfilld Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_backend_backfill_deletes_failed[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backfill Deletions Failed per second",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Backfill Deletion",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 18
},
"id": 14,
"panels": [],
"title": "Tickets",
"type": "row"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 19
},
"id": 26,
"legend": {
"alignAsTable": true,
"avg": false,
"current": true,
"max": false,
"min": false,
"rightSide": true,
"show": true,
"total": false,
"values": true
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "99%-ile",
"refId": "A"
},
{
"expr": "histogram_quantile(0.95, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "95%-ile",
"refId": "B"
},
{
"expr": "histogram_quantile(0.90, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "90%-ile",
"refId": "C"
},
{
"expr": "histogram_quantile(0.50, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "50%-ile",
"refId": "D"
},
{
"expr": "histogram_quantile(0.10, sum(rate(scale_frontend_tickets_time_to_assignment_bucket[5m])) by (le))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "10%-ile",
"refId": "E"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Time to Assignment",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "ms",
"label": null,
"logBase": 2,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 19
},
"id": 12,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Tickets in Matches pers second",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Tickets In Matches",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
@ -146,7 +454,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 10
"y": 27
},
"id": 2,
"legend": {
@ -242,12 +550,12 @@
"dashes": false,
"fill": 1,
"gridPos": {
"h": 9,
"h": 8,
"w": 12,
"x": 12,
"y": 10
"y": 28
},
"id": 12,
"id": 22,
"legend": {
"avg": false,
"current": false,
@ -272,18 +580,26 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_sum_tickets_returned[5m]))",
"expr": "sum(rate(scale_frontend_tickets_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backend Tickets Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_frontend_ticket_deletes_failed[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Tickets in Matches pers second",
"refId": "A"
"legendFormat": "Backend Ticket Deletions Failed per second",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Tickets In Matches",
"title": "Ticket Deletion",
"tooltip": {
"shared": true,
"sort": 0,
@ -331,7 +647,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 19
"y": 36
},
"id": 24,
"legend": {
@ -414,106 +730,13 @@
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 19
},
"id": 22,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(scale_backend_tickets_deleted[5m]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Backend Tickets Deleted per second",
"refId": "B"
},
{
"expr": "sum(rate(scale_backend_ticket_deletes_failed[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Backend Ticket Deletions Failed per second",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Deletion",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 27
"y": 44
},
"id": 18,
"panels": [],
@ -530,7 +753,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 28
"y": 45
},
"id": 6,
"legend": {
@ -616,7 +839,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 28
"y": 45
},
"id": 19,
"legend": {
@ -705,7 +928,7 @@
"h": 1,
"w": 24,
"x": 0,
"y": 36
"y": 53
},
"id": 21,
"panels": [],
@ -722,7 +945,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 37
"y": 54
},
"id": 8,
"legend": {
@ -807,7 +1030,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 37
"y": 54
},
"id": 10,
"legend": {
@ -890,7 +1113,7 @@
}
}
],
"refresh": "",
"refresh": "10s",
"schemaVersion": 18,
"style": "dark",
"tags": [],

@ -31,7 +31,7 @@ spec:
protocol: TCP
port: {{ .Values.scaleFrontend.httpPort }}
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "openmatchscale.scaleFrontend.hostName" . }}

@ -19,15 +19,10 @@ name: open-match-telemetry
version: 0.0.0-dev
dependencies:
- name: prometheus
version: 9.2.0
repository: https://kubernetes-charts.storage.googleapis.com/
version: 15.8.7
repository: https://prometheus-community.github.io/helm-charts
condition: global.telemetry.prometheus.enabled,prometheus.enabled
- name: grafana
version: 4.0.1
repository: https://kubernetes-charts.storage.googleapis.com/
version: 6.29.3
repository: https://grafana.github.io/helm-charts
condition: global.telemetry.grafana.enabled,grafana.enabled
- name: jaeger
version: 0.13.3
repository: https://kubernetes-charts-incubator.storage.googleapis.com/
condition: global.telemetry.jaeger.enabled,jaeger.enabled

@ -13,12 +13,20 @@
# limitations under the License.
{{- if .Values.global.telemetry.grafana.enabled }}
{{- $om := (include "openmatch.fullname" .) }}
{{ $c := 1 | int }}
{{- range $path, $bytes := .Files.Glob "dashboards/*.json" }}
{{- $ext := ext $path }}
{{- $file := base $path }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "openmatch.fullname" . }}-dashboards
name: {{ printf "%s-%s-dashboard" $om ((printf "%s" $file) | replace $ext "") }}
labels:
grafana_dashboard: "1"
grafana_dashboard: {{ printf "d-%d" $c }}
data:
{{- (.Files.Glob "dashboards/*.json").AsConfig | nindent 2 }}
{{- ($.Files.Glob (printf "dashboards/%s" $file)).AsConfig | nindent 2 }}
---
{{ $c = add1 $c }}
{{- end }}
{{- end }}

@ -146,23 +146,3 @@ grafana:
datasources:
enabled: true
plugins: grafana-piechart-panel
jaeger:
enabled: true
# Configs for the cassandra schema job
schema:
pullPolicy: Always
mode: prod
activeDeadlineSeconds: 200
cassandra:
image:
tag: latest
config:
cluster_size: 2
resources:
requests:
memory: 4Gi
cpu: 2
limits:
memory: 4Gi
cpu: 2

@ -102,17 +102,17 @@ resources:
{{- end -}}
{{- define "openmatch.volumemounts.withredis" -}}
{{- if .Values.redis.usePassword }}
{{- if .Values.redis.auth.enabled }}
- name: redis-password
mountPath: {{ .Values.redis.secretMountPath }}
{{- end -}}
{{- end -}}
{{- define "openmatch.volumes.withredis" -}}
{{- if .Values.redis.usePassword }}
{{- if .Values.redis.auth.enabled }}
- name: redis-password
secret:
secretName: {{ include "call-nested" (list . "redis" "redis.fullname") }}
secretName: {{ include "call-nested" (list . "redis" "common.names.fullname") }}
{{- end -}}
{{- end -}}
@ -150,16 +150,46 @@ readinessProbe:
failureThreshold: 2
{{- end -}}
{{- define "openmatch.HorizontalPodAutoscaler.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.targetCPUUtilizationPercentage }}
{{- define "openmatch.HorizontalPodAutoscaler.frontend.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.frontend.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.frontend.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.frontend.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.HorizontalPodAutoscaler.backend.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.backend.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.backend.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.backend.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.HorizontalPodAutoscaler.query.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.query.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.query.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.query.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.HorizontalPodAutoscaler.evaluator.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.evaluator.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.evaluator.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.evaluator.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.HorizontalPodAutoscaler.matchfunction.spec.common" -}}
minReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.matchfunction.minReplicas }}
maxReplicas: {{ .Values.global.kubernetes.horizontalPodAutoScaler.matchfunction.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoScaler.matchfunction.targetCPUUtilizationPercentage }}
{{- end -}}
{{- define "openmatch.serviceAccount.name" -}}
{{- .Values.global.kubernetes.serviceAccount | default (printf "%s-unprivileged-service" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
{{- define "openmatch.serviceAccountAnnotations" -}}
{{- if .Values.global.kubernetes.serviceAccountAnnotations -}}
{{- toYaml (.Values.global.kubernetes.serviceAccountAnnotations) | nindent 4 -}}
{{- end -}}
{{- end -}}
{{- define "openmatch.swaggerui.hostName" -}}
{{- .Values.swaggerui.hostName | default (printf "%s-swaggerui" (include "openmatch.fullname" . ) ) -}}
{{- end -}}
@ -192,25 +222,9 @@ targetCPUUtilizationPercentage: {{ .Values.global.kubernetes.horizontalPodAutoSc
{{- printf "%s-configmap-override" (include "openmatch.fullname" . ) -}}
{{- end -}}
{{- define "openmatch.jaeger.agent" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.agent.name") }}:6831
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "openmatch.jaeger.collector" -}}
{{- if index .Values "open-match-telemetry" "enabled" -}}
{{- if index .Values "open-match-telemetry" "jaeger" "enabled" -}}
http://{{ include "call-nested" (list . "open-match-telemetry.jaeger" "jaeger.collector.name") }}:14268/api/traces
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Call templates from sub-charts in a synthesized context, workaround for https://github.com/helm/helm/issues/3920
Mainly useful for things like `{{ include "call-nested" (list . "redis" "redis.fullname") }}`
Mainly useful for things like `{{ include "call-nested" (list . "redis" "common.names.fullname") }}`
https://github.com/helm/helm/issues/4535#issuecomment-416022809
https://github.com/helm/helm/issues/4535#issuecomment-477778391
*/}}
@ -223,4 +237,4 @@ https://github.com/helm/helm/issues/4535#issuecomment-477778391
{{- $values = index $values . }}
{{- end }}
{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }}
{{- end }}
{{- end }}

@ -51,7 +51,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.backend.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
{{- include "openmatch.HorizontalPodAutoscaler.backend.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment

@ -51,7 +51,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.frontend.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
{{- include "openmatch.HorizontalPodAutoscaler.frontend.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment

@ -26,7 +26,11 @@ metadata:
data:
matchmaker_config_default.yaml: |-
logging:
{{- if .Values.global.logging.level }}
level: {{ .Values.global.logging.level }}
{{- else }}
level: debug
{{- end }}
{{- if .Values.global.telemetry.stackdriverMetrics.enabled }}
format: stackdriver
{{- else }}
@ -90,13 +94,25 @@ data:
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}
sentinelHostname: {{ include "call-nested" (list . "redis" "common.names.fullname") }}
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
hostname: {{ include "call-nested" (list . "redis" "redis.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
hostname: {{ include "call-nested" (list . "redis" "common.names.fullname") }}-master.{{ .Release.Namespace }}.svc.cluster.local
{{- if .Values.redis.redisPort }}
# source value: redis.redisPort = {{ .Values.redis.redisPort }}
port: {{ .Values.redis.redisPort }}
{{- else if index .Values "open-match-core" "redis" "port" }}
# source value: open-match-core.redis.port = {{ index .Values "open-match-core" "redis" "port"}}
port: {{ index .Values "open-match-core" "redis" "port" }}
{{- end }}
{{- if .Values.redis.user }}
# source value: redis.user = {{ .Values.redis.user }}
user: {{ .Values.redis.user }}
{{- else if index .Values "open-match-core" "redis" "user" }}
# source value: open-match-core.redis.user = {{ index .Values "open-match-core" "redis" "user"}}
user: {{ index .Values "open-match-core" "redis" "user"}}
{{- end }}
{{- end}}
{{- else }}
# BYO Redis setups
@ -104,7 +120,7 @@ data:
port: {{ index .Values "open-match-core" "redis" "port" }}
user: {{ index .Values "open-match-core" "redis" "user" }}
{{- end }}
usePassword: {{ .Values.redis.usePassword }}
usePassword: {{ .Values.redis.auth.enabled }}
passwordPath: {{ .Values.redis.secretMountPath }}/redis-password
pool:
maxIdle: {{ index .Values "open-match-core" "redis" "pool" "maxIdle" }}
@ -117,10 +133,6 @@ data:
traceSamplingFraction: "{{ .Values.global.telemetry.traceSamplingFraction }}"
zpages:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
agentEndpoint: "{{ tpl .Values.global.telemetry.jaeger.agentEndpoint . }}"
collectorEndpoint: "{{ tpl .Values.global.telemetry.jaeger.collectorEndpoint . }}"
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if index .Values "open-match-override" }}
{{- if index .Values "open-match-override" "enabled" }}
apiVersion: v1
kind: ConfigMap
metadata:
@ -40,6 +40,7 @@ data:
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
backfillLockTimeout: {{ index .Values "open-match-core" "backfillLockTimeout" }}
api:
evaluator:
hostname: "{{ include "openmatch.evaluator.hostName" . }}"

@ -1,140 +0,0 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if index .Values "open-match-core" "enabled" }}
{{- if empty .Values.ci }}
# This is the least restricted PSP used to create privileged pods to disable THP in host kernel.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
# Redis
- min: 6379
max: 6379
- min: 9121
max: 9121
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
---
# This does not allow creating privileged pods and restrict binded pods to use the specified port ranges.
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
spec:
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
forbiddenSysctls:
- "*"
fsGroup:
rule: "MustRunAs"
ranges:
- min: 1
max: 65535
hostIPC: false
hostNetwork: false
hostPID: false
hostPorts:
# Open Match Services
- min: 50500
max: 50510
- min: 51500
max: 51510
# Cassandra
- min: 7000
max: 7001
- min: 7199
max: 7199
- min: 9042
max: 9042
- min: 9160
max: 9160
# Grafana
- min: 3000
max: 3000
# Jaeger
- min: 5775
max: 5778
- min: 6831
max: 6832
- min: 14267
max: 14268
- min: 9411
max: 9411
- min: 16686
max: 16686
# Prometheus
- min: 9090
max: 9093
- min: 9100
max: 9100
- min: 8080
max: 8080
privileged: false # Prevents creation of privileged Pods
readOnlyRootFilesystem: false
# Capabilities Reference: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
# TODO: Open Match itself doesn't need any capabilties but its dependencies do. We should split out the service accounts later.
allowedCapabilities:
- CHOWN
#requiredDropCapabilities:
#- ALL
runAsUser:
rule: "RunAsAny"
# Blocked on isolating the open match services from dependencies (Redis, Prometheus, etc.)
# Require the container to run without root privileges.
#rule: 'MustRunAsNonRoot'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: RunAsAny
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
# Assume that persistentVolumes set up by the cluster admin are safe to use.
- 'persistentVolumeClaim'
{{- end }}
{{- end }}

@ -51,7 +51,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
name: {{ include "openmatch.query.hostName" . }}
{{- include "openmatch.HorizontalPodAutoscaler.spec.common" . | nindent 2 }}
{{- include "openmatch.HorizontalPodAutoscaler.query.spec.common" . | nindent 2 }}
---
apiVersion: apps/v1
kind: Deployment

@ -23,6 +23,8 @@ metadata:
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
pod-security.kubernetes.io/enforce: baseline
pod-security.kubernetes.io/warn: baseline
{{- end }}
---
# Create a universal service account for open-match-core services.
@ -31,83 +33,12 @@ kind: ServiceAccount
metadata:
name: {{ include "openmatch.serviceAccount.name" . }}
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
{{- include "openmatch.serviceAccountAnnotations" . }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
automountServiceAccountToken: true
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
verbs:
- use
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-service-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
subjects:
- kind: Group
name: system:authenticated # All authenticated users
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-service-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-redis-podsecuritypolicy
verbs:
- use
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "openmatch.fullname" . }}-redis-role-binding
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ include "call-nested" (list . "redis" "redis.serviceAccountName") }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "openmatch.fullname" . }}-redis-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

@ -24,14 +24,6 @@ metadata:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
rules:
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- {{ include "openmatch.fullname" . }}-core-podsecuritypolicy
verbs:
- use
# Grant this role get & list permission for k8s endpoints and pods resources
# Required for e2e in-cluster testing.
- apiGroups:

@ -91,7 +91,7 @@ spec:
command: ["go"]
args:
- "test"
- "./internal/testing/e2e"
- "./testing/e2e"
- "-v"
- "-timeout"
- "150s"

@ -106,17 +106,12 @@ configs:
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
redis:
redisPort: 6379
usePassword: false
usePasswordFile: false
secretMountPath: /opt/bitnami/redis/secrets
architecture: standalone
auth:
enabled: false
configmap: |
maxclients 100000
maxmemory 500000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -130,20 +125,6 @@ redis:
enabled: false
metrics:
enabled: true
cluster:
slaveCount: 3
serviceAccount:
create: true
slave:
persistence:
enabled: false
resources:
requests:
memory: 1Gi
cpu: 1
limits:
memory: 2Gi
cpu: 2
sysctlImage:
# Enable this setting in production if you are running Open Match under Linux environment
enabled: true
@ -188,6 +169,8 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -226,6 +209,10 @@ open-match-customize:
# function:
# image: [YOUR_MMF_IMAGE]
# Controls if users need to install open-match-override ConfigMap.
open-match-override:
enabled: false
# Global configurations that are visible to all subcharts
global:
kubernetes:
@ -236,9 +223,26 @@ global:
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
horizontalPodAutoScaler:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
frontend:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
backend:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
query:
minReplicas: 5
maxReplicas: 10
targetCPUUtilizationPercentage: 60
evaluator:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
matchfunction:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
resources:
requests:
memory: 1.5Gi
@ -248,6 +252,8 @@ global:
cpu: 2
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Adds custom annotations to the Open Match service account.
serviceAccountAnnotations: {}
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -280,10 +286,6 @@ global:
traceSamplingFraction: 0.005 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
prometheus:
enabled: false
endpoint: "/metrics"

@ -103,39 +103,38 @@ configs:
configName: '{{ include "openmatch.configmap.override" . }}'
# Override Redis settings
# https://hub.helm.sh/charts/stable/redis
# https://github.com/helm/charts/tree/master/stable/redis
# https://github.com/bitnami/charts/tree/ba40e46ec6831e039f5bf213ab10e9748603ce6c/bitnami/redis
redis:
redisPort: 6379
usePassword: false
usePasswordFile: false
secretMountPath: /opt/bitnami/redis/secrets
configmap: |
architecture: standalone
auth:
enabled: false
commonConfiguration: |
maxclients 100000
maxmemory 300000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
persistence:
enabled: false
resources:
requests:
memory: 300Mi
cpu: 0.5
replica:
disableCommands: [] # don't disable 'FLUSH-' commands
replicaCount: 3
resources:
requests:
memory: 300Mi
cpu: 0.5
metrics:
enabled: true
cluster:
slaveCount: 2
serviceAccount:
create: true
sysctlImage:
# Enable this setting in production if you are running Open Match under Linux environment
enabled: false
# Disable this setting in production if you are not running Redis in Linux
enabled: true
mountHostSys: true
# Redis may require some changes in the kernel of the host machine to work as expected,
# in particular increasing the somaxconn value and disabling transparent huge pages.
# https://github.com/helm/charts/tree/master/stable/redis#host-kernel-settings
# https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/
command:
- /bin/sh
- -c
@ -173,6 +172,8 @@ open-match-core:
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
# Duration for redis locks to expire.
backfillLockTimeout: 1m
redis:
enabled: true
@ -211,6 +212,10 @@ open-match-customize:
# function:
# image: [YOUR_MMF_IMAGE]
# Controls if users need to install open-match-override ConfigMap.
open-match-override:
enabled: false
# Global configurations that are visible to all subcharts
global:
kubernetes:
@ -221,9 +226,26 @@ global:
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
horizontalPodAutoScaler:
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
frontend:
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
backend:
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
query:
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 80
evaluator:
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
matchfunction:
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
resources:
requests:
memory: 100Mi
@ -233,6 +255,8 @@ global:
cpu: 100m
# Overrides the name of the service account which provides an identity for processes that run in a Pod in Open Match.
serviceAccount:
# Adds custom annotations to the Open Match service account.
serviceAccountAnnotations: {}
# Use this field if you need to override the port type for all services defined in this chart
service:
portType:
@ -254,7 +278,7 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.0.0-dev
tag: 1.8.1
pullPolicy: Always
# Expose the telemetry configurations to all subcharts because prometheus, for example,
@ -265,10 +289,6 @@ global:
traceSamplingFraction: 0.01 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
agentEndpoint: '{{ include "openmatch.jaeger.agent" . }}'
collectorEndpoint: '{{ include "openmatch.jaeger.collector" . }}'
prometheus:
enabled: false
endpoint: "/metrics"

@ -27,7 +27,6 @@ admissionWhitelistPatterns:
- namePattern: docker.io/bitnami/redis:*
- namePattern: docker.io/oliver006/redis_exporter:*
- namePattern: registry.hub.docker.com/grafana/*
- namePattern: registry.hub.docker.com/jaegertracing/*
- namePattern: registry.hub.docker.com/jimmidyson/configmap-reload:*
- namePattern: registry.hub.docker.com/kiwigrid/k8s-sidecar:*
- namePattern: registry.hub.docker.com/prom/*

@ -96,11 +96,6 @@ resource "google_container_cluster" "ci_cluster" {
identity_namespace = "${var.gcp_project_id}.svc.id.goog"
}
# Enable PodSecurityPolicy
pod_security_policy_config {
enabled = "true"
}
node_config {
oauth_scopes = [
"https://www.googleapis.com/auth/devstorage.read_only",

@ -0,0 +1,26 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openmatch.internal;
option go_package = "open-match.dev/open-match/internal/ipb";
import "api/messages.proto";
message BackfillInternal {
// Represents a backfill entity which is used to fill partially full matches
openmatch.Backfill backfill = 1;
// List of ticket IDs associated with a current backfill
repeated string ticket_ids = 2;
}

@ -27,14 +27,15 @@ import (
"go.opencensus.io/stats"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/pkg/errors"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/internal/appmain/contextcause"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/rpc"
@ -55,6 +56,7 @@ var (
"app": "openmatch",
"component": "app.backend",
})
errBackfillGenerationMismatch = errors.New("backfill generation mismatch")
)
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
@ -89,7 +91,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
return synchronizeSend(ctx, syncStream, m, proposals)
})
eg.Go(func() error {
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs, s.store)
})
var mmfErr error
@ -142,7 +144,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc, store statestore.Service) error {
var startMmfsOnce sync.Once
for {
@ -169,6 +171,31 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
if !ok {
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
}
backfill := match.GetBackfill()
if backfill != nil {
ticketIds := make([]string, 0, len(match.Tickets))
for _, t := range match.Tickets {
ticketIds = append(ticketIds, t.Id)
}
err = createOrUpdateBackfill(ctx, backfill, ticketIds, store)
if err != nil {
e, ok := status.FromError(err)
if err == errBackfillGenerationMismatch || (ok && e.Code() == codes.NotFound) {
err = doReleaseTickets(ctx, ticketIds, store)
if err != nil {
logger.WithError(err).Errorf("failed to remove match tickets from pending release: %v", ticketIds)
}
continue
}
return errors.Wrapf(err, "failed to handle match backfill: %s", match.MatchId)
}
}
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
@ -252,6 +279,8 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
if err != nil {
return status.Errorf(codes.FailedPrecondition, "failed to create mmf http request for profile %s: %s", profile.GetName(), err.Error())
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Transfer-Encoding", "chunked")
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
@ -296,16 +325,25 @@ func callHTTPMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
}
func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTicketsRequest) (*pb.ReleaseTicketsResponse, error) {
err := s.store.DeleteTicketsFromPendingRelease(ctx, req.GetTicketIds())
err := doReleaseTickets(ctx, req.GetTicketIds(), s.store)
if err != nil {
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
return nil, err
}
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
return &pb.ReleaseTicketsResponse{}, nil
}
func doReleaseTickets(ctx context.Context, ticketIds []string, store statestore.Service) error {
err := store.DeleteTicketsFromPendingRelease(ctx, ticketIds)
if err != nil {
err = errors.Wrap(err, "failed to remove the awaiting tickets from the pending release for requested tickets")
return err
}
stats.Record(ctx, ticketsReleased.M(int64(len(ticketIds))))
return nil
}
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
err := s.store.ReleaseAllTickets(ctx)
if err != nil {
@ -330,6 +368,56 @@ func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicket
return resp, nil
}
func createOrUpdateBackfill(ctx context.Context, backfill *pb.Backfill, ticketIds []string, store statestore.Service) error {
if backfill.Id == "" {
backfill.Id = xid.New().String()
backfill.CreateTime = timestamppb.Now()
backfill.Generation = 1
err := store.CreateBackfill(ctx, backfill, ticketIds)
if err != nil {
return err
}
return store.IndexBackfill(ctx, backfill)
}
m := store.NewMutex(backfill.Id)
err := m.Lock(ctx)
if err != nil {
return err
}
defer func() {
_, unlockErr := m.Unlock(context.Background())
if unlockErr != nil {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).WithError(unlockErr).Error("failed to make unlock")
}
}()
b, ids, err := store.GetBackfill(ctx, backfill.Id)
if err != nil {
return err
}
if b.Generation != backfill.Generation {
logger.WithFields(logrus.Fields{"backfill_id": backfill.Id}).
WithError(errBackfillGenerationMismatch).
Errorf("failed to update backfill, expecting: %d generation but got: %d", b.Generation, backfill.Generation)
return errBackfillGenerationMismatch
}
b.SearchFields = backfill.SearchFields
b.Extensions = backfill.Extensions
b.Generation++
err = store.UpdateBackfill(ctx, b, append(ids, ticketIds...))
if err != nil {
return err
}
return store.IndexBackfill(ctx, b)
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, tickets, err := store.UpdateAssignments(ctx, req)
if err != nil {
@ -373,10 +461,7 @@ func recordTimeToAssignment(ctx context.Context, ticket *pb.Ticket) error {
}
now := time.Now()
created, err := ptypes.Timestamp(ticket.CreateTime)
if err != nil {
return err
}
created := ticket.CreateTime.AsTime()
stats.Record(ctx, ticketsTimeToAssignment.M(now.Sub(created).Milliseconds()))

@ -22,7 +22,6 @@ import (
"go.opencensus.io/stats"
"github.com/golang/protobuf/ptypes"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"open-match.dev/open-match/internal/app/evaluator"
@ -63,7 +62,7 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
// then returns matches which don't collide with previously returned matches.
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
matches := make([]*matchInp, 0)
nilEvlautionInputs := 0
nilEvaluationInputs := 0
for m := range in {
// Evaluation criteria is optional, but sort it lower than any matches which
@ -73,7 +72,7 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
}
if a, ok := m.Extensions["evaluation_input"]; ok {
err := ptypes.UnmarshalAny(a, inp)
err := a.UnmarshalTo(inp)
if err != nil {
logger.WithFields(logrus.Fields{
"match_id": m.MatchId,
@ -82,7 +81,7 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
continue
}
} else {
nilEvlautionInputs++
nilEvaluationInputs++
}
matches = append(matches, &matchInp{
match: m,
@ -90,16 +89,17 @@ func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
})
}
if nilEvlautionInputs > 0 {
if nilEvaluationInputs > 0 {
logger.WithFields(logrus.Fields{
"count": nilEvlautionInputs,
"count": nilEvaluationInputs,
}).Info("Some matches don't have the optional field evaluation_input set.")
}
sort.Sort(byScore(matches))
d := decollider{
ticketsUsed: make(map[string]*collidingMatch),
ticketsUsed: make(map[string]*collidingMatch),
backfillsUsed: make(map[string]*collidingMatch),
}
for _, m := range matches {
@ -121,11 +121,25 @@ type collidingMatch struct {
}
type decollider struct {
resultIDs []string
ticketsUsed map[string]*collidingMatch
resultIDs []string
ticketsUsed map[string]*collidingMatch
backfillsUsed map[string]*collidingMatch
}
func (d *decollider) maybeAdd(m *matchInp) {
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
if cm, ok := d.backfillsUsed[m.match.Backfill.Id]; ok {
logger.WithFields(logrus.Fields{
"match_id": m.match.GetMatchId(),
"backfill_id": m.match.Backfill.Id,
"match_score": m.inp.GetScore(),
"colliding_match_id": cm.id,
"colliding_match_score": cm.score,
}).Info("Higher quality match with colliding backfill found. Rejecting match.")
return
}
}
for _, t := range m.match.GetTickets() {
if cm, ok := d.ticketsUsed[t.Id]; ok {
logger.WithFields(logrus.Fields{
@ -139,6 +153,13 @@ func (d *decollider) maybeAdd(m *matchInp) {
}
}
if m.match.Backfill != nil && m.match.Backfill.Id != "" {
d.backfillsUsed[m.match.Backfill.Id] = &collidingMatch{
id: m.match.GetMatchId(),
score: m.inp.GetScore(),
}
}
for _, t := range m.match.GetTickets() {
d.ticketsUsed[t.Id] = &collidingMatch{
id: m.match.GetMatchId(),

@ -18,15 +18,14 @@ import (
"context"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"open-match.dev/open-match/pkg/pb"
)
func mustAny(m proto.Message) *any.Any {
result, err := ptypes.MarshalAny(m)
func mustAny(m proto.Message) *anypb.Any {
result, err := anypb.New(m)
if err != nil {
panic(err)
}
@ -37,11 +36,14 @@ func TestEvaluate(t *testing.T) {
ticket1 := &pb.Ticket{Id: "1"}
ticket2 := &pb.Ticket{Id: "2"}
ticket3 := &pb.Ticket{Id: "3"}
backfill0 := &pb.Backfill{}
backfill1 := &pb.Backfill{Id: "1"}
backfill2 := &pb.Backfill{Id: "2"}
ticket12Score1 := &pb.Match{
MatchId: "ticket12Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Extensions: map[string]*any.Any{
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
@ -51,7 +53,7 @@ func TestEvaluate(t *testing.T) {
ticket12Score10 := &pb.Match{
MatchId: "ticket12Score10",
Tickets: []*pb.Ticket{ticket2, ticket1},
Extensions: map[string]*any.Any{
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 10,
}),
@ -61,7 +63,7 @@ func TestEvaluate(t *testing.T) {
ticket123Score5 := &pb.Match{
MatchId: "ticket123Score5",
Tickets: []*pb.Ticket{ticket1, ticket2, ticket3},
Extensions: map[string]*any.Any{
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 5,
}),
@ -71,13 +73,68 @@ func TestEvaluate(t *testing.T) {
ticket3Score50 := &pb.Match{
MatchId: "ticket3Score50",
Tickets: []*pb.Ticket{ticket3},
Extensions: map[string]*any.Any{
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 50,
}),
},
}
ticket1Backfill0Score1 := &pb.Match{
MatchId: "ticket1Backfill0Score1",
Tickets: []*pb.Ticket{ticket1},
Backfill: backfill0,
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket2Backfill0Score1 := &pb.Match{
MatchId: "ticket2Backfill0Score1",
Tickets: []*pb.Ticket{ticket2},
Backfill: backfill0,
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score1 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 1,
}),
},
}
ticket12Backfill1Score10 := &pb.Match{
MatchId: "ticket12Bacfill1Score1",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill1,
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 10,
}),
},
}
ticket12Backfill2Score5 := &pb.Match{
MatchId: "ticket12Backfill2Score5",
Tickets: []*pb.Ticket{ticket1, ticket2},
Backfill: backfill2,
Extensions: map[string]*anypb.Any{
"evaluation_input": mustAny(&pb.DefaultEvaluationCriteria{
Score: 5,
}),
},
}
tests := []struct {
description string
testMatches []*pb.Match
@ -108,6 +165,16 @@ func TestEvaluate(t *testing.T) {
testMatches: []*pb.Match{ticket12Score1, ticket12Score10, ticket123Score5, ticket3Score50},
wantMatchIDs: []string{ticket12Score10.GetMatchId(), ticket3Score50.GetMatchId()},
},
{
description: "test evaluator ignores backfills with empty id",
testMatches: []*pb.Match{ticket1Backfill0Score1, ticket2Backfill0Score1},
wantMatchIDs: []string{ticket1Backfill0Score1.GetMatchId(), ticket2Backfill0Score1.GetMatchId()},
},
{
description: "test deduplicates matches by backfill and tickets and returns match with higher score",
testMatches: []*pb.Match{ticket12Backfill1Score1, ticket12Backfill1Score10, ticket12Backfill2Score5},
wantMatchIDs: []string{ticket12Backfill1Score10.GetMatchId()},
},
}
for _, test := range tests {

@ -46,7 +46,7 @@ var (
func BindServiceFor(eval Evaluator) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterEvaluatorServer(s, &evaluatorService{eval})
pb.RegisterEvaluatorServer(s, &evaluatorService{evaluate: eval})
}, pb.RegisterEvaluatorHandlerFromEndpoint)
b.RegisterViews(
matchesPerEvaluateRequestView,

@ -25,8 +25,10 @@ import (
)
var (
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerBackfill = stats.Int64("open-match.dev/frontend/total_bytes_per_backfill", "Total bytes per backfill", stats.UnitBytes)
searchFieldsPerBackfill = stats.Int64("open-match.dev/frontend/searchfields_per_backfill", "Searchfields per backfill", stats.UnitDimensionless)
totalBytesPerTicketView = &view.View{
Measure: totalBytesPerTicket,
@ -40,6 +42,18 @@ var (
Description: "SearchFields per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
totalBytesPerBackfillView = &view.View{
Measure: totalBytesPerBackfill,
Name: "open-match.dev/frontend/total_bytes_per_backfill",
Description: "Total bytes per backfill",
Aggregation: telemetry.DefaultBytesDistribution,
}
searchFieldsPerBackfillView = &view.View{
Measure: searchFieldsPerBackfill,
Name: "open-match.dev/frontend/searchfields_per_backfill",
Description: "SearchFields per backfill",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindService creates the frontend service and binds it to the serving harness.
@ -56,6 +70,8 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
b.RegisterViews(
totalBytesPerTicketView,
searchFieldsPerTicketView,
totalBytesPerBackfillView,
searchFieldsPerBackfillView,
)
return nil
}

@ -17,15 +17,15 @@ package frontend
import (
"context"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
@ -72,7 +72,7 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
}
ticket.Id = xid.New().String()
ticket.CreateTime = ptypes.TimestampNow()
ticket.CreateTime = timestamppb.Now()
sfCount := 0
sfCount += len(ticket.GetSearchFields().GetDoubleArgs())
@ -94,16 +94,148 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
return ticket, nil
}
// CreateBackfill creates a new Backfill object.
// it assigns an unique Id to the input Backfill and record it in state storage.
// Set initial LastAcknowledge time for this Backfill.
// A Backfill is considered as ready for matchmaking once it is created.
// - If SearchFields exist in a Backfill, CreateBackfill will also index these fields such that one can query the ticket with query.QueryBackfills function.
func (s *frontendService) CreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest) (*pb.Backfill, error) {
// Perform input validation.
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
if req.Backfill.CreateTime != nil {
return nil, status.Errorf(codes.InvalidArgument, "backfills cannot be created with create time set")
}
return doCreateBackfill(ctx, req, s.store)
}
func doCreateBackfill(ctx context.Context, req *pb.CreateBackfillRequest, store statestore.Service) (*pb.Backfill, error) {
// Generate an id and create a Backfill in state storage
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input ticket proto")
}
backfill.Id = xid.New().String()
backfill.CreateTime = timestamppb.Now()
backfill.Generation = 1
sfCount := 0
sfCount += len(backfill.GetSearchFields().GetDoubleArgs())
sfCount += len(backfill.GetSearchFields().GetStringArgs())
sfCount += len(backfill.GetSearchFields().GetTags())
stats.Record(ctx, searchFieldsPerBackfill.M(int64(sfCount)))
stats.Record(ctx, totalBytesPerBackfill.M(int64(proto.Size(backfill))))
err := store.CreateBackfill(ctx, backfill, []string{})
if err != nil {
return nil, err
}
err = store.IndexBackfill(ctx, backfill)
if err != nil {
return nil, err
}
return backfill, nil
}
// UpdateBackfill updates a Backfill object, if present.
// Update would increment generation in Redis.
// Only Extensions and SearchFields would be updated.
// CreateTime is not changed on Update
func (s *frontendService) UpdateBackfill(ctx context.Context, req *pb.UpdateBackfillRequest) (*pb.Backfill, error) {
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "request is nil")
}
if req.Backfill == nil {
return nil, status.Errorf(codes.InvalidArgument, ".backfill is required")
}
backfill, ok := proto.Clone(req.Backfill).(*pb.Backfill)
if !ok {
return nil, status.Error(codes.Internal, "failed to clone input backfill proto")
}
bfID := backfill.Id
if bfID == "" {
return nil, status.Error(codes.InvalidArgument, "backfill ID should exist")
}
m := s.store.NewMutex(bfID)
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(context.Background()); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bfStored, associatedTickets, err := s.store.GetBackfill(ctx, bfID)
if err != nil {
return nil, err
}
// Update generation here, because Frontend is used by GameServer only
bfStored.SearchFields = backfill.SearchFields
bfStored.Extensions = backfill.Extensions
bfStored.PersistentField = backfill.PersistentField
// Autoincrement generation, input backfill generation validation is performed
// on Backend only (after MMF round)
bfStored.Generation++
err = s.store.UpdateBackfill(ctx, bfStored, []string{})
if err != nil {
return nil, err
}
err = s.store.DeleteTicketsFromPendingRelease(ctx, associatedTickets)
if err != nil {
return nil, err
}
err = s.store.IndexBackfill(ctx, bfStored)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": bfStored.Id,
}).Error("failed to index the backfill")
return nil, err
}
return bfStored, nil
}
// DeleteBackfill deletes a Backfill by its ID.
func (s *frontendService) DeleteBackfill(ctx context.Context, req *pb.DeleteBackfillRequest) (*emptypb.Empty, error) {
bfID := req.GetBackfillId()
if bfID == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
err := s.store.DeleteBackfillCompletely(ctx, bfID)
// Deleting of Backfill is inevitable when it is expired, so we don't worry about error here
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("error on DeleteBackfill")
}
return &emptypb.Empty{}, nil
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
//
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*empty.Empty, error) {
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*emptypb.Empty, error) {
err := doDeleteTicket(ctx, req.GetTicketId(), s.store)
if err != nil {
return nil, err
}
return &empty.Empty{}, nil
return &emptypb.Empty{}, nil
}
func doDeleteTicket(ctx context.Context, id string, store statestore.Service) error {
@ -147,36 +279,113 @@ func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketReques
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
ctx := stream.Context()
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
sender := func(assignment *pb.Assignment) error {
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
}
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)
}
sender := func(assignment *pb.Assignment) error {
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
}
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)
}
func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
var currAssignment *pb.Assignment
var ok bool
callback := func(assignment *pb.Assignment) error {
if (currAssignment == nil && assignment != nil) || !proto.Equal(currAssignment, assignment) {
currAssignment, ok = proto.Clone(assignment).(*pb.Assignment)
if !ok {
return status.Error(codes.Internal, "failed to cast the assignment object")
select {
case <-ctx.Done():
return status.Errorf(codes.Aborted, ctx.Err().Error())
default:
if ctx.Err() != nil {
return status.Errorf(codes.Aborted, ctx.Err().Error())
}
err := sender(currAssignment)
if err != nil {
return status.Errorf(codes.Aborted, err.Error())
if (currAssignment == nil && assignment != nil) || !proto.Equal(currAssignment, assignment) {
currAssignment, ok = proto.Clone(assignment).(*pb.Assignment)
if !ok {
return status.Error(codes.Internal, "failed to cast the assignment object")
}
err := sender(currAssignment)
if err != nil {
return status.Errorf(codes.Aborted, err.Error())
}
}
return nil
}
return nil
}
return store.GetAssignments(ctx, id, callback)
}
// AcknowledgeBackfill is used to notify OpenMatch about GameServer connection info.
// This triggers an assignment process.
func (s *frontendService) AcknowledgeBackfill(ctx context.Context, req *pb.AcknowledgeBackfillRequest) (*pb.AcknowledgeBackfillResponse, error) {
if req.GetBackfillId() == "" {
return nil, status.Errorf(codes.InvalidArgument, ".BackfillId is required")
}
if req.GetAssignment() == nil {
return nil, status.Errorf(codes.InvalidArgument, ".Assignment is required")
}
m := s.store.NewMutex(req.GetBackfillId())
err := m.Lock(ctx)
if err != nil {
return nil, err
}
defer func() {
if _, err = m.Unlock(context.Background()); err != nil {
logger.WithError(err).Error("error on mutex unlock")
}
}()
bf, associatedTickets, err := s.store.GetBackfill(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
err = s.store.UpdateAcknowledgmentTimestamp(ctx, req.GetBackfillId())
if err != nil {
return nil, err
}
resp := &pb.AcknowledgeBackfillResponse{
Backfill: bf,
Tickets: make([]*pb.Ticket, 0),
}
if len(associatedTickets) != 0 {
setResp, tickets, err := s.store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{{TicketIds: associatedTickets, Assignment: req.GetAssignment()}},
})
if err != nil {
return nil, err
}
resp.Tickets = tickets
// log errors returned from UpdateAssignments to track tickets with NotFound errors
for _, f := range setResp.Failures {
logger.Errorf("failed to assign ticket %s, cause %d", f.TicketId, f.Cause)
}
for _, id := range associatedTickets {
err = s.store.DeindexTicket(ctx, id)
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
if err != nil {
logger.WithError(err).Errorf("failed to deindex ticket %s after updating the assignments", id)
}
}
// Remove all tickets associated with backfill, because unassigned tickets are not found only
err = s.store.UpdateBackfill(ctx, bf, []string{})
if err != nil {
return nil, err
}
}
return resp, nil
}
// GetBackfill fetches a Backfill object by its ID.
func (s *frontendService) GetBackfill(ctx context.Context, req *pb.GetBackfillRequest) (*pb.Backfill, error) {
bf, _, err := s.store.GetBackfill(ctx, req.GetBackfillId())
return bf, err
}

@ -26,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/internal/statestore"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
@ -81,13 +82,190 @@ func TestDoCreateTickets(t *testing.T) {
if err == nil {
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
require.True(t, matched)
require.Nil(t, err)
require.NoError(t, err)
require.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
}
})
}
}
func TestCreateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg: cfg, store: store}
var testCases = []struct {
description string
request *pb.CreateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.CreateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "createTime should not exist in input",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{CreateTime: timestamppb.Now()}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfills cannot be created with create time set",
},
{
description: "empty Backfill, no errors",
request: &pb.CreateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.OK,
expectedMessage: "",
},
{
description: "normal backfill",
request: &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err := fs.CreateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs = frontendService{cfg: cfg, store: store}
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestUpdateBackfill(t *testing.T) {
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
fs := frontendService{cfg: cfg, store: store}
res, err := fs.CreateBackfill(ctx, &pb.CreateBackfillRequest{
Backfill: &pb.Backfill{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
},
},
},
})
require.NoError(t, err)
require.NotNil(t, res)
var testCases = []struct {
description string
request *pb.UpdateBackfillRequest
result *pb.Backfill
expectedCode codes.Code
expectedMessage string
}{
{
description: "nil request check",
request: nil,
expectedCode: codes.InvalidArgument,
expectedMessage: "request is nil",
},
{
description: "nil backfill - error is returned",
request: &pb.UpdateBackfillRequest{Backfill: nil},
expectedCode: codes.InvalidArgument,
expectedMessage: ".backfill is required",
},
{
description: "empty Backfill, error with no backfill ID",
request: &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{}},
expectedCode: codes.InvalidArgument,
expectedMessage: "backfill ID should exist",
},
{
description: "normal backfill",
request: &pb.UpdateBackfillRequest{
Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"search": "me",
}}}},
expectedCode: codes.OK,
expectedMessage: "",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
res, err = fs.UpdateBackfill(ctx, tc.request)
if tc.expectedCode == codes.OK {
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, tc.request.Backfill.SearchFields.DoubleArgs, res.SearchFields.DoubleArgs)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedCode.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), tc.expectedMessage)
}
})
}
// expect error with canceled context
store, closer = statestoreTesting.NewStoreServiceForTesting(t, cfg)
fs = frontendService{cfg: cfg, store: store}
defer closer()
ctx, cancel := context.WithCancel(context.Background())
cancel()
res, err = fs.UpdateBackfill(ctx, &pb.UpdateBackfillRequest{Backfill: &pb.Backfill{
Id: res.Id,
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}})
require.NotNil(t, err)
require.Equal(t, codes.Unknown.String(), status.Convert(err).Code().String())
require.Nil(t, res)
}
func TestDoWatchAssignments(t *testing.T) {
testTicket := &pb.Ticket{
Id: "test-id",
@ -131,7 +309,7 @@ func TestDoWatchAssignments(t *testing.T) {
},
},
})
require.Nil(t, err)
require.NoError(t, err)
wg.Done()
}
}(wg)
@ -165,6 +343,89 @@ func TestDoWatchAssignments(t *testing.T) {
}
}
// TestAcknowledgeBackfillValidation - test input validation only
func TestAcknowledgeBackfillValidation(t *testing.T) {
cfg := viper.New()
tests := []struct {
description string
request *pb.AcknowledgeBackfillRequest
expectedMessage string
}{
{
description: "no BackfillId, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "", Assignment: &pb.Assignment{Connection: "10.0.0.1"}},
expectedMessage: ".BackfillId is required",
},
{
description: "no Assignment, error is expected",
request: &pb.AcknowledgeBackfillRequest{BackfillId: "1234", Assignment: nil},
expectedMessage: ".Assignment is required",
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fs := frontendService{cfg: cfg, store: store}
bf, err := fs.AcknowledgeBackfill(ctx, test.request)
require.Equal(t, codes.InvalidArgument.String(), status.Convert(err).Code().String())
require.Equal(t, test.expectedMessage, status.Convert(err).Message())
require.Nil(t, bf)
})
}
}
// TestAcknowledgeBackfill verifies timestamp part of AcknowledgeBackfill call,
// assignment part tested in a corresponding E2E test.
// Expired backfill can not be acknowledged
func TestAcknowledgeBackfill(t *testing.T) {
cfg := viper.New()
ctx := context.Background()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
fs := frontendService{cfg: cfg, store: store}
resp, err := fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.NoError(t, err)
require.NotNil(t, resp)
require.NotNil(t, resp.Backfill)
require.NotNil(t, resp.Tickets)
// Use wrong BackfillID, error is returned
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: "42", Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Error(t, err)
require.Nil(t, resp)
require.Equal(t, "Backfill id: 42 not found", status.Convert(err).Message())
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
ids, err := store.GetExpiredBackfillIDs(ctx)
require.NoError(t, err)
require.Len(t, ids, 1)
resp, err = fs.AcknowledgeBackfill(ctx, &pb.AcknowledgeBackfillRequest{BackfillId: fakeBackfill.Id, Assignment: &pb.Assignment{Connection: "10.0.0.1"}})
require.Nil(t, resp)
require.Error(t, err)
require.Equal(t, codes.Unavailable.String(), status.Convert(err).Code().String())
require.Contains(t, status.Convert(err).Message(), "can not acknowledge an expired backfill, id: 1")
}
func TestDoDeleteTicket(t *testing.T) {
fakeTicket := &pb.Ticket{
Id: "1",
@ -274,3 +535,109 @@ func TestDoGetTicket(t *testing.T) {
})
}
}
func TestGetBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
cfg := viper.New()
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service)
wantTicket *pb.Backfill
wantCode codes.Code
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
wantCode: codes.Unavailable,
},
{
description: "expect not found code since ticket does not exist",
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
wantCode: codes.NotFound,
},
{
description: "expect ok code with output ticket equivalent to fakeBackfill",
preAction: func(ctx context.Context, _ context.CancelFunc, store statestore.Service) {
store.CreateBackfill(ctx, fakeBackfill, []string{})
},
wantCode: codes.OK,
wantTicket: fakeBackfill,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
fs := frontendService{cfg: cfg, store: store}
test.preAction(ctx, cancel, store)
backfill, err := fs.GetBackfill(ctx, &pb.GetBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
if err == nil {
require.Equal(t, test.wantTicket.GetId(), backfill.GetId())
require.Equal(t, test.wantTicket.SearchFields.DoubleArgs, backfill.SearchFields.DoubleArgs)
}
})
}
}
func TestDoDeleteBackfill(t *testing.T) {
fakeBackfill := &pb.Backfill{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"test-arg": 1,
},
},
}
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
defer closer()
ctx := context.Background()
err := store.CreateBackfill(ctx, fakeBackfill, []string{})
require.NoError(t, err)
cfg := viper.New()
fs := frontendService{cfg: cfg, store: store}
tests := []struct {
description string
id string
wantCode codes.Code
}{
{
description: "expect ok code since delete backfill does not care about if backfill exists or not",
id: "222",
wantCode: codes.OK,
},
{
description: "expect ok code",
id: "1",
wantCode: codes.OK,
},
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
_, err := fs.DeleteBackfill(ctx, &pb.DeleteBackfillRequest{BackfillId: fakeBackfill.GetId()})
require.NoError(t, err)
require.Equal(t, test.wantCode.String(), status.Convert(err).Code().String())
})
}
}

253
internal/app/query/cache.go Normal file

@ -0,0 +1,253 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
// cache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type cache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Multithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
value interface{}
update func(statestore.Service, interface{}) error
err error
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (c *cache) request(ctx context.Context, f func(interface{})) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled before request sent.")
case <-c.startRunRequest:
go c.runRequest()
case c.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "cache request canceled waiting for access.")
case <-cr.runNow:
defer c.wg.Done()
}
if c.err != nil {
return c.err
}
f(c.value)
return nil
}
func (c *cache) runRequest() {
defer func() {
c.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-c.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-c.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
c.err = c.update(c.store, c.value)
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the cache.
for _, req := range reqs {
c.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
c.wg.Done()
}
}
// wait for requests to finish using cache.
c.wg.Wait()
}
func newTicketCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Ticket),
update: updateTicketCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateTicketCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Ticket, but got: %T", value)
}
t := time.Now()
previousCount := len(tickets)
currentAll, err := store.GetIndexedIDSet(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id := range tickets {
if _, ok := currentAll[id]; !ok {
delete(tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := store.GetTickets(context.Background(), toFetch)
if err != nil {
return err
}
for _, t := range newTickets {
tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), totalActiveTickets.M(int64(len(currentAll))))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
stats.Record(context.Background(), totalPendingTickets.M(int64(len(toFetch))))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tickets))
return nil
}
func newBackfillCache(b *appmain.Bindings, store statestore.Service) *cache {
c := &cache{
store: store,
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
value: make(map[string]*pb.Backfill),
update: updateBackfillCache,
}
c.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(c.store.HealthCheck)
return c
}
func updateBackfillCache(store statestore.Service, value interface{}) error {
if value == nil {
return status.Error(codes.InvalidArgument, "value is required")
}
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
return status.Errorf(codes.InvalidArgument, "expecting value type map[string]*pb.Backfill, but got: %T", value)
}
t := time.Now()
previousCount := len(backfills)
index, err := store.GetIndexedBackfills(context.Background())
if err != nil {
return err
}
deletedCount := 0
for id, backfill := range backfills {
generation, ok := index[id]
if !ok || backfill.Generation < int64(generation) {
delete(backfills, id)
deletedCount++
}
}
toFetch := []string{}
for id := range index {
if _, ok := backfills[id]; !ok {
toFetch = append(toFetch, id)
}
}
fetchedBackfills, err := store.GetBackfills(context.Background(), toFetch)
if err != nil {
return err
}
for _, b := range fetchedBackfills {
backfills[b.Id] = b
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), totalBackfillsTickets.M(int64(len(backfills))))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(t))/float64(time.Millisecond)))
logger.Debugf("Backfill Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(backfills))
return nil
}

@ -19,16 +19,21 @@ import (
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
totalActiveTickets = stats.Int64("open-match.dev/query/total_active_tickets", "Number of tickets", stats.UnitDimensionless)
backfillsPerQuery = stats.Int64("open-match.dev/query/backfills_per_query", "Number of backfills per query", stats.UnitDimensionless)
totalBackfillsTickets = stats.Int64("open-match.dev/query/total_backfill_tickets", "Number of current backfills", stats.UnitDimensionless)
totalPendingTickets = stats.Int64("open-match.dev/query/tickets_pending_release", "Number of tickets per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of items query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
ticketsPerQueryView = &view.View{
Measure: ticketsPerQuery,
@ -36,10 +41,34 @@ var (
Description: "Tickets per query",
Aggregation: telemetry.DefaultCountDistribution,
}
ticketsActiveTotalView = &view.View{
Measure: totalActiveTickets,
Name: "open-match.dev/query/total_active_tickets",
Description: "Total tickets",
Aggregation: view.LastValue(),
}
backfillsPerQueryView = &view.View{
Measure: backfillsPerQuery,
Name: "open-match.dev/query/backfills_per_query",
Description: "Backfills per query",
Aggregation: telemetry.DefaultCountDistribution,
}
backfillTotalTicketsView = &view.View{
Measure: totalBackfillsTickets,
Name: "open-match.dev/query/total_backfill_tickets",
Description: "Total number of backfill tickets",
Aggregation: view.LastValue(),
}
pendingTotalTicketsView = &view.View{
Measure: totalPendingTickets,
Name: "open-match.dev/query/total_pending_tickets",
Description: "Total number of pending tickets",
Aggregation: view.LastValue(),
}
cacheTotalItemsView = &view.View{
Measure: cacheTotalItems,
Name: "open-match.dev/query/total_cached_items",
Description: "Total number of cached tickets",
Description: "Total number of cached items",
Aggregation: view.LastValue(),
}
cacheFetchedItemsView = &view.View{
@ -70,9 +99,11 @@ var (
// BindService creates the query service and binds it to the serving harness.
func BindService(p *appmain.Params, b *appmain.Bindings) error {
store := statestore.New(p.Config())
service := &queryService{
cfg: p.Config(),
tc: newTicketCache(b, p.Config()),
tc: newTicketCache(b, store),
bc: newBackfillCache(b, store),
}
b.AddHandleFunc(func(s *grpc.Server) {
@ -80,6 +111,10 @@ func BindService(p *appmain.Params, b *appmain.Bindings) error {
}, pb.RegisterQueryServiceHandlerFromEndpoint)
b.RegisterViews(
ticketsPerQueryView,
ticketsActiveTotalView,
backfillsPerQueryView,
backfillTotalTicketsView,
pendingTotalTicketsView,
cacheTotalItemsView,
cacheUpdateView,
cacheFetchedItemsView,

@ -15,20 +15,14 @@
package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
@ -40,10 +34,11 @@ var (
)
// queryService API provides utility functions for common MMF functionality such
// as retreiving Tickets from state storage.
// as retrieving Tickets from state storage.
type queryService struct {
cfg config.View
tc *ticketCache
tc *cache
bc *cache
}
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
@ -59,7 +54,13 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
var results []*pb.Ticket
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
@ -103,7 +104,13 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
var results []string
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(value interface{}) {
tickets, ok := value.(map[string]*pb.Ticket)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Ticket, but got: %T", value)
return
}
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
@ -134,6 +141,56 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
return nil
}
func (s *queryService) QueryBackfills(req *pb.QueryBackfillsRequest, responseServer pb.QueryService_QueryBackfillsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
if err != nil {
return err
}
var results []*pb.Backfill
err = s.bc.request(ctx, func(value interface{}) {
backfills, ok := value.(map[string]*pb.Backfill)
if !ok {
logger.Errorf("expecting value type map[string]*pb.Backfill, but got: %T", value)
return
}
for _, backfill := range backfills {
if pf.In(backfill) {
results = append(results, backfill)
}
}
})
if err != nil {
err = errors.Wrap(err, "QueryBackfills: failed to run request")
return err
}
stats.Record(ctx, backfillsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryBackfillsResponse{
Backfills: results[start:end],
})
if err != nil {
return err
}
}
return nil
}
func getPageSize(cfg config.View) int {
const (
name = "queryPageSize"
@ -165,159 +222,3 @@ func getPageSize(cfg config.View) int {
return pSize
}
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// ticketCache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type ticketCache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Mutlithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
tickets map[string]*pb.Ticket
err error
}
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
tickets: make(map[string]*pb.Ticket),
}
tc.startRunRequest <- struct{}{}
b.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
case <-tc.startRunRequest:
go tc.runRequest()
case tc.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
case <-cr.runNow:
defer tc.wg.Done()
}
if tc.err != nil {
return tc.err
}
f(tc.tickets)
return nil
}
func (tc *ticketCache) runRequest() {
defer func() {
tc.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-tc.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-tc.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
tc.update()
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
for _, req := range reqs {
tc.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
tc.wg.Done()
}
}
// wait for requests to finish using ticket cache.
tc.wg.Wait()
}
func (tc *ticketCache) update() {
st := time.Now()
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
if err != nil {
tc.err = err
return
}
deletedCount := 0
for id := range tc.tickets {
if _, ok := currentAll[id]; !ok {
delete(tc.tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tc.tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
if err != nil {
tc.err = err
return
}
for _, t := range newTickets {
tc.tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

@ -51,7 +51,7 @@ var (
// -> m2c ->
// remember return channel m7c for match | fanInFanOut
// -> m3c ->
// setmappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// set mappings from matchIDs to ticketIDs| cacheMatchIDToTicketIDs
// -> m4c -> (buffered)
// send to evaluator | wrapEvaluator
// -> m5c -> (buffered)
@ -113,7 +113,7 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
registration.allM1cSent.Done()
return
}
registration.m1c.send(mAndM6c{m: req.Proposal, m7c: registration.m7c})
registration.m1c.send(mAndM7c{m: req.Proposal, m7c: registration.m7c})
}
}()
@ -212,7 +212,7 @@ func (s *synchronizerService) runCycle() {
/////////////////////////////////////// Initialize cycle
ctx, cancel := contextcause.WithCancelCause(context.Background())
m2c := make(chan mAndM6c)
m2c := make(chan mAndM7c)
m3c := make(chan *pb.Match)
m4c := make(chan *pb.Match)
m5c := make(chan string)
@ -289,17 +289,24 @@ Registration:
r.cancelMmfs <- struct{}{}
}
})
<-closedOnCycleEnd
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
// Clean up in case it was never needed.
cancelProposalCollection.Stop()
err := s.store.CleanupBackfills(ctx)
if err != nil {
logger.Errorf("Failed to clean up backfills, %s", err.Error())
}
}
///////////////////////////////////////
///////////////////////////////////////
type mAndM6c struct {
type mAndM7c struct {
m *pb.Match
m7c chan string
}
@ -309,10 +316,10 @@ type mAndM6c struct {
// This channel is remembered in a map, and the match is passed to be evaluated.
// When a match returns from evaluation, it's ID is looked up in the map and the
// match is returned on that channel.
func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
m6cMap := make(map[string]chan<- string)
func fanInFanOut(m2c <-chan mAndM7c, m3c chan<- *pb.Match, m6c <-chan string) {
m7cMap := make(map[string]chan<- string)
defer func(m2c <-chan mAndM6c) {
defer func(m2c <-chan mAndM7c) {
for range m2c {
}
}(m2c)
@ -321,7 +328,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
select {
case m2, ok := <-m2c:
if ok {
m6cMap[m2.m.GetMatchId()] = m2.m7c
m7cMap[m2.m.GetMatchId()] = m2.m7c
m3c <- m2.m
} else {
close(m3c)
@ -334,7 +341,7 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
return
}
m7c, ok := m6cMap[m5]
m7c, ok := m7cMap[m5]
if ok {
m7c <- m5
} else {
@ -350,8 +357,8 @@ func fanInFanOut(m2c <-chan mAndM6c, m3c chan<- *pb.Match, m6c <-chan string) {
///////////////////////////////////////
type cutoffSender struct {
m1c chan<- mAndM6c
m2c chan<- mAndM6c
m1c chan<- mAndM7c
m2c chan<- mAndM7c
closed chan struct{}
closeOnce sync.Once
}
@ -359,8 +366,8 @@ type cutoffSender struct {
// cutoffSender allows values to be passed on the provided channel until cutoff
// has been called. This closed the provided channel. Calls to send after
// cutoff work, but values are ignored.
func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
m1c := make(chan mAndM6c)
func newCutoffSender(m2c chan<- mAndM7c) *cutoffSender {
m1c := make(chan mAndM7c)
c := &cutoffSender{
m1c: m1c,
m2c: m2c,
@ -383,7 +390,7 @@ func newCutoffSender(m2c chan<- mAndM6c) *cutoffSender {
}
// send passes the value on the channel if still open, otherwise does nothing.
func (c *cutoffSender) send(match mAndM6c) {
func (c *cutoffSender) send(match mAndM7c) {
select {
case <-c.closed:
case c.m1c <- match:
@ -436,7 +443,7 @@ func getTicketIds(tickets []*pb.Ticket) []string {
// Calls statestore to add all of the tickets returned by the evaluator to the
// pendingRelease list. If it partially fails for whatever reason (not all tickets will
// nessisarily be in the same call), only the matches which can be safely
// necessarily be in the same call), only the matches which can be safely
// returned to the Synchronize calls are.
func (s *synchronizerService) addMatchesToPendingRelease(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
totalMatches := 0

@ -133,7 +133,7 @@ var getTests = []struct {
},
}
//nolint: gocritic, staticcheck
// nolint: gocritic, staticcheck
func Test_Get(t *testing.T) {
for _, tt := range getTests {
tt := tt

@ -58,6 +58,30 @@ func Read() (*viper.Viper, error) {
return nil, fmt.Errorf("fatal error reading override config file, desc: %s", err.Error())
}
if !cfg.IsSet("registrationInterval") {
log.Printf("config: registrationInterval is not set in matchmaker_config_override.yaml")
}
if !cfg.IsSet("proposalCollectionInterval") {
log.Printf("config: proposalCollectionInterval is not set in matchmaker_config_override.yaml")
}
if !cfg.IsSet("pendingReleaseTimeout") {
log.Printf("config: pendingReleaseTimeout is not set in matchmaker_config_override.yaml")
}
if !cfg.IsSet("assignedDeleteTimeout") {
log.Printf("config: assignedDeleteTimeout is not set in matchmaker_config_override.yaml")
}
if !cfg.IsSet("queryPageSize") {
log.Printf("config: queryPageSize is not set in matchmaker_config_override.yaml")
}
if !cfg.IsSet("backfillLockTimeout") {
log.Printf("config: backfillLockTimeout is not set in matchmaker_config_override.yaml")
}
// Look for updates to the config; in Kubernetes, this is implemented using
// a ConfigMap that is written to the matchmaker_config_override.yaml file, which is
// what the Open Match components using Viper monitor for changes.

@ -1,3 +1,4 @@
//go:build !race
// +build !race
// Copyright 2019 Google LLC

@ -20,10 +20,10 @@ package filter
import (
"time"
"github.com/golang/protobuf/ptypes"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/pkg/pb"
)
@ -52,15 +52,17 @@ func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
var err error
if pool.GetCreatedBefore() != nil {
if cb, err = ptypes.Timestamp(pool.GetCreatedBefore()); err != nil {
if err = pool.GetCreatedBefore().CheckValid(); err != nil {
return nil, status.Error(codes.InvalidArgument, ".invalid created_before value")
}
cb = pool.GetCreatedBefore().AsTime()
}
if pool.GetCreatedAfter() != nil {
if ca, err = ptypes.Timestamp(pool.GetCreatedAfter()); err != nil {
if err = pool.GetCreatedAfter().CheckValid(); err != nil {
return nil, status.Error(codes.InvalidArgument, ".invalid created_after value")
}
ca = pool.GetCreatedAfter().AsTime()
}
return &PoolFilter{
@ -72,16 +74,24 @@ func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
}, nil
}
type filteredEntity interface {
GetId() string
GetSearchFields() *pb.SearchFields
GetCreateTime() *timestamppb.Timestamp
}
// In returns true if the Ticket meets all the criteria for this PoolFilter.
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
s := ticket.GetSearchFields()
func (pf *PoolFilter) In(entity filteredEntity) bool {
s := entity.GetSearchFields()
if s == nil {
s = emptySearchFields
}
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
// CreateTime is only populated by Open Match and hence expected to be valid.
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
if err := entity.GetCreateTime().CheckValid(); err == nil {
ct := entity.GetCreateTime().AsTime()
if !pf.CreatedAfter.IsZero() {
if !ct.After(pf.CreatedAfter) {
return false
@ -96,7 +106,7 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
} else {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": ticket.GetId(),
"id": entity.GetId(),
}).Error("failed to get time from Timestamp proto")
}
}
@ -106,10 +116,27 @@ func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
if !ok {
return false
}
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
switch f.Exclude {
case pb.DoubleRangeFilter_NONE:
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MIN:
if !(v > f.Min && v <= f.Max) {
return false
}
case pb.DoubleRangeFilter_MAX:
if !(v >= f.Min && v < f.Max) {
return false
}
case pb.DoubleRangeFilter_BOTH:
if !(v > f.Min && v < f.Max) {
return false
}
}
}
for _, f := range pf.StringEqualsFilters {

@ -17,43 +17,62 @@ package filter
import (
"testing"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/internal/filter/testcases"
"open-match.dev/open-match/pkg/pb"
)
func TestMeetsCriteria(t *testing.T) {
testInclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if !pf.In(entity) {
t.Error("entity should be included in the pool")
}
}
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
require.NoError(t, err)
require.NotNil(t, pf)
tc.Ticket.CreateTime = ptypes.TimestampNow()
if !pf.In(tc.Ticket) {
t.Error("ticket should be included in the pool")
}
testInclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: timestamppb.Now(),
})
testInclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: timestamppb.Now(),
})
})
}
testExclusion := func(t *testing.T, pool *pb.Pool, entity filteredEntity) {
pf, err := NewPoolFilter(pool)
require.NoError(t, err)
require.NotNil(t, pf)
if pf.In(entity) {
t.Error("ticket should be excluded from the pool")
}
}
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
require.NoError(t, err)
require.NotNil(t, pf)
tc.Ticket.CreateTime = ptypes.TimestampNow()
if pf.In(tc.Ticket) {
t.Error("ticket should be excluded from the pool")
}
testExclusion(t, tc.Pool, &pb.Ticket{
SearchFields: tc.SearchFields,
CreateTime: timestamppb.Now(),
})
testExclusion(t, tc.Pool, &pb.Backfill{
SearchFields: tc.SearchFields,
CreateTime: timestamppb.Now(),
})
})
}
}
@ -68,7 +87,7 @@ func TestValidPoolFilter(t *testing.T) {
{
"invalid create before",
&pb.Pool{
CreatedBefore: &timestamp.Timestamp{Nanos: -1},
CreatedBefore: &timestamppb.Timestamp{Nanos: -1},
},
codes.InvalidArgument,
".invalid created_before value",
@ -76,7 +95,7 @@ func TestValidPoolFilter(t *testing.T) {
{
"invalid create after",
&pb.Pool{
CreatedAfter: &timestamp.Timestamp{Nanos: -1},
CreatedAfter: &timestamppb.Timestamp{Nanos: -1},
},
codes.InvalidArgument,
".invalid created_after value",

@ -20,16 +20,15 @@ import (
"math"
"time"
"github.com/golang/protobuf/ptypes"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"google.golang.org/protobuf/types/known/timestamppb"
"open-match.dev/open-match/pkg/pb"
)
// TestCase defines a single filtering test case to run.
type TestCase struct {
Name string
Ticket *pb.Ticket
Pool *pb.Pool
Name string
SearchFields *pb.SearchFields
Pool *pb.Pool
}
// IncludedTestCases returns a list of test cases where using the given filter,
@ -39,22 +38,38 @@ func IncludedTestCases() []TestCase {
return []TestCase{
{
"no filters or fields",
&pb.Ticket{},
nil,
&pb.Pool{},
},
simpleDoubleRange("simpleInRange", 5, 0, 10),
simpleDoubleRange("exactMatch", 5, 5, 5),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("simpleInRange", 5, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeNone", 0, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeNone", 1, 0, 1, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("excludeMin", 1, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeMax", 0, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("excludeBoth", 2, 0, 3, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 3, pb.DoubleRangeFilter_BOTH),
{
"String equals simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
&pb.Pool{
@ -69,11 +84,9 @@ func IncludedTestCases() []TestCase {
{
"TagPresent simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"mytag",
},
&pb.SearchFields{
Tags: []string{
"mytag",
},
},
&pb.Pool{
@ -87,11 +100,9 @@ func IncludedTestCases() []TestCase {
{
"TagPresent multiple all present",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
&pb.Pool{
@ -113,21 +124,21 @@ func IncludedTestCases() []TestCase {
{
"CreatedBefore simple positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
},
},
{
"CreatedAfter simple positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
},
},
{
"Between CreatedBefore and CreatedAfter positive",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
@ -135,7 +146,7 @@ func IncludedTestCases() []TestCase {
},
{
"No time search criteria positive",
&pb.Ticket{},
nil,
&pb.Pool{},
},
}
@ -148,7 +159,7 @@ func ExcludedTestCases() []TestCase {
return []TestCase{
{
"DoubleRange no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
@ -161,7 +172,7 @@ func ExcludedTestCases() []TestCase {
},
{
"StringEquals no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
@ -173,7 +184,7 @@ func ExcludedTestCases() []TestCase {
},
{
"TagPresent no SearchFields",
&pb.Ticket{},
nil,
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
@ -182,14 +193,11 @@ func ExcludedTestCases() []TestCase {
},
},
},
{
"double range missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
},
&pb.Pool{
@ -203,22 +211,66 @@ func ExcludedTestCases() []TestCase {
},
},
simpleDoubleRange("valueTooLow", -1, 0, 10),
simpleDoubleRange("valueTooHigh", 11, 0, 10),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("exactMatch", 5, 5, 5, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooLow", -1, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueTooHigh", 11, 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_NONE),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MIN),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_MAX),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN(), pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("valueIsMax", 1, 0, 1, pb.DoubleRangeFilter_MAX),
simpleDoubleRange("valueIsMin", 0, 0, 1, pb.DoubleRangeFilter_MIN),
simpleDoubleRange("excludeBoth", 0, 0, 1, pb.DoubleRangeFilter_BOTH),
simpleDoubleRange("excludeBoth", 1, 0, 1, pb.DoubleRangeFilter_BOTH),
{
"String equals simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
&pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
&pb.Pool{
@ -233,11 +285,9 @@ func ExcludedTestCases() []TestCase {
{
"String equals missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
&pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
},
&pb.Pool{
@ -252,11 +302,9 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"MYTAG",
},
&pb.SearchFields{
Tags: []string{
"MYTAG",
},
},
&pb.Pool{
@ -270,11 +318,9 @@ func ExcludedTestCases() []TestCase {
{
"TagPresent multiple with one missing",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
&pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
&pb.Pool{
@ -294,21 +340,21 @@ func ExcludedTestCases() []TestCase {
{
"CreatedBefore simple negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
},
},
{
"CreatedAfter simple negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
},
},
{
"Created before time range negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
@ -316,7 +362,7 @@ func ExcludedTestCases() []TestCase {
},
{
"Created after time range negative",
&pb.Ticket{},
nil,
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
@ -329,14 +375,12 @@ func ExcludedTestCases() []TestCase {
}
}
func simpleDoubleRange(name string, value, min, max float64) TestCase {
func simpleDoubleRange(name string, value, min, max float64, exclude pb.DoubleRangeFilter_Exclude) TestCase {
return TestCase{
"double range " + name,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
},
&pb.Pool{
@ -345,6 +389,7 @@ func simpleDoubleRange(name string, value, min, max float64) TestCase {
DoubleArg: "field",
Min: min,
Max: max,
Exclude: exclude,
},
},
},
@ -369,16 +414,14 @@ func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
return TestCase{
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
&pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
@ -403,9 +446,9 @@ func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
}
}
func timestamp(t time.Time) *tspb.Timestamp {
tsp, err := ptypes.TimestampProto(t)
if err != nil {
func timestamp(t time.Time) *timestamppb.Timestamp {
tsp := timestamppb.New(t)
if err := tsp.CheckValid(); err != nil {
panic(err)
}

177
internal/ipb/messages.pb.go Normal file

@ -0,0 +1,177 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.24.0
// source: internal/api/messages.proto
package ipb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type BackfillInternal struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Represents a backfill entity which is used to fill partially full matches
Backfill *pb.Backfill `protobuf:"bytes,1,opt,name=backfill,proto3" json:"backfill,omitempty"`
// List of ticket IDs associated with a current backfill
TicketIds []string `protobuf:"bytes,2,rep,name=ticket_ids,json=ticketIds,proto3" json:"ticket_ids,omitempty"`
}
func (x *BackfillInternal) Reset() {
*x = BackfillInternal{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_messages_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BackfillInternal) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BackfillInternal) ProtoMessage() {}
func (x *BackfillInternal) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_messages_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BackfillInternal.ProtoReflect.Descriptor instead.
func (*BackfillInternal) Descriptor() ([]byte, []int) {
return file_internal_api_messages_proto_rawDescGZIP(), []int{0}
}
func (x *BackfillInternal) GetBackfill() *pb.Backfill {
if x != nil {
return x.Backfill
}
return nil
}
func (x *BackfillInternal) GetTicketIds() []string {
if x != nil {
return x.TicketIds
}
return nil
}
var File_internal_api_messages_proto protoreflect.FileDescriptor
var file_internal_api_messages_proto_rawDesc = []byte{
0x0a, 0x1b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x62, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c,
0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x2f, 0x0a, 0x08, 0x62, 0x61, 0x63,
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70,
0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69,
0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x73, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_internal_api_messages_proto_rawDescOnce sync.Once
file_internal_api_messages_proto_rawDescData = file_internal_api_messages_proto_rawDesc
)
func file_internal_api_messages_proto_rawDescGZIP() []byte {
file_internal_api_messages_proto_rawDescOnce.Do(func() {
file_internal_api_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_messages_proto_rawDescData)
})
return file_internal_api_messages_proto_rawDescData
}
var file_internal_api_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_internal_api_messages_proto_goTypes = []interface{}{
(*BackfillInternal)(nil), // 0: openmatch.internal.BackfillInternal
(*pb.Backfill)(nil), // 1: openmatch.Backfill
}
var file_internal_api_messages_proto_depIdxs = []int32{
1, // 0: openmatch.internal.BackfillInternal.backfill:type_name -> openmatch.Backfill
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_api_messages_proto_init() }
func file_internal_api_messages_proto_init() {
if File_internal_api_messages_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_api_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BackfillInternal); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_messages_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_internal_api_messages_proto_goTypes,
DependencyIndexes: file_internal_api_messages_proto_depIdxs,
MessageInfos: file_internal_api_messages_proto_msgTypes,
}.Build()
File_internal_api_messages_proto = out.File
file_internal_api_messages_proto_rawDesc = nil
file_internal_api_messages_proto_goTypes = nil
file_internal_api_messages_proto_depIdxs = nil
}

@ -1,71 +1,93 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.31.0
// protoc v4.24.0
// source: internal/api/synchronizer.proto
package ipb
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
pb "open-match.dev/open-match/pkg/pb"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SynchronizeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// A match returned by an mmf.
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Proposal *pb.Match `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"`
}
func (m *SynchronizeRequest) Reset() { *m = SynchronizeRequest{} }
func (m *SynchronizeRequest) String() string { return proto.CompactTextString(m) }
func (*SynchronizeRequest) ProtoMessage() {}
func (x *SynchronizeRequest) Reset() {
*x = SynchronizeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeRequest) ProtoMessage() {}
func (x *SynchronizeRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeRequest.ProtoReflect.Descriptor instead.
func (*SynchronizeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_35ff6b85fea1c4b7, []int{0}
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{0}
}
func (m *SynchronizeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeRequest.Unmarshal(m, b)
}
func (m *SynchronizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeRequest.Marshal(b, m, deterministic)
}
func (m *SynchronizeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeRequest.Merge(m, src)
}
func (m *SynchronizeRequest) XXX_Size() int {
return xxx_messageInfo_SynchronizeRequest.Size(m)
}
func (m *SynchronizeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeRequest proto.InternalMessageInfo
func (m *SynchronizeRequest) GetProposal() *pb.Match {
if m != nil {
return m.Proposal
func (x *SynchronizeRequest) GetProposal() *pb.Match {
if x != nil {
return x.Proposal
}
return nil
}
type SynchronizeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Instructs the backend call that it can start running the mmfs.
StartMmfs bool `protobuf:"varint,1,opt,name=start_mmfs,json=startMmfs,proto3" json:"start_mmfs,omitempty"`
// Instructs the backend call that it should cancel any RPC calls to the mmfs,
@ -73,198 +95,170 @@ type SynchronizeResponse struct {
CancelMmfs bool `protobuf:"varint,2,opt,name=cancel_mmfs,json=cancelMmfs,proto3" json:"cancel_mmfs,omitempty"`
// A match ID returned by the evaluator and should be returned to the FetchMatches
// caller.
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
MatchId string `protobuf:"bytes,4,opt,name=match_id,json=matchId,proto3" json:"match_id,omitempty"`
}
func (m *SynchronizeResponse) Reset() { *m = SynchronizeResponse{} }
func (m *SynchronizeResponse) String() string { return proto.CompactTextString(m) }
func (*SynchronizeResponse) ProtoMessage() {}
func (x *SynchronizeResponse) Reset() {
*x = SynchronizeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SynchronizeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SynchronizeResponse) ProtoMessage() {}
func (x *SynchronizeResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_api_synchronizer_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SynchronizeResponse.ProtoReflect.Descriptor instead.
func (*SynchronizeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_35ff6b85fea1c4b7, []int{1}
return file_internal_api_synchronizer_proto_rawDescGZIP(), []int{1}
}
func (m *SynchronizeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SynchronizeResponse.Unmarshal(m, b)
}
func (m *SynchronizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SynchronizeResponse.Marshal(b, m, deterministic)
}
func (m *SynchronizeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SynchronizeResponse.Merge(m, src)
}
func (m *SynchronizeResponse) XXX_Size() int {
return xxx_messageInfo_SynchronizeResponse.Size(m)
}
func (m *SynchronizeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SynchronizeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SynchronizeResponse proto.InternalMessageInfo
func (m *SynchronizeResponse) GetStartMmfs() bool {
if m != nil {
return m.StartMmfs
func (x *SynchronizeResponse) GetStartMmfs() bool {
if x != nil {
return x.StartMmfs
}
return false
}
func (m *SynchronizeResponse) GetCancelMmfs() bool {
if m != nil {
return m.CancelMmfs
func (x *SynchronizeResponse) GetCancelMmfs() bool {
if x != nil {
return x.CancelMmfs
}
return false
}
func (m *SynchronizeResponse) GetMatchId() string {
if m != nil {
return m.MatchId
func (x *SynchronizeResponse) GetMatchId() string {
if x != nil {
return x.MatchId
}
return ""
}
func init() {
proto.RegisterType((*SynchronizeRequest)(nil), "openmatch.internal.SynchronizeRequest")
proto.RegisterType((*SynchronizeResponse)(nil), "openmatch.internal.SynchronizeResponse")
var File_internal_api_synchronizer_proto protoreflect.FileDescriptor
var file_internal_api_synchronizer_proto_rawDesc = []byte{
0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73,
0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x1a, 0x12, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x12, 0x53, 0x79, 0x6e,
0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x2c, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x22, 0x76, 0x0a,
0x13, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4d,
0x6d, 0x66, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x6d, 0x6d,
0x66, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c,
0x4d, 0x6d, 0x66, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x4a,
0x04, 0x08, 0x03, 0x10, 0x04, 0x32, 0x72, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f,
0x6e, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6f,
0x70, 0x65, 0x6e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x6f, 0x70, 0x65,
0x6e, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x69, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func init() { proto.RegisterFile("internal/api/synchronizer.proto", fileDescriptor_35ff6b85fea1c4b7) }
var (
file_internal_api_synchronizer_proto_rawDescOnce sync.Once
file_internal_api_synchronizer_proto_rawDescData = file_internal_api_synchronizer_proto_rawDesc
)
var fileDescriptor_35ff6b85fea1c4b7 = []byte{
// 263 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0x89, 0x16, 0x4d, 0x27, 0x1e, 0xca, 0x7a, 0xa9, 0x05, 0x69, 0xe9, 0xa1, 0xe6, 0xa0,
0x1b, 0xa9, 0xdf, 0xa0, 0x37, 0x85, 0x5e, 0xe2, 0xcd, 0x4b, 0xd9, 0x24, 0x53, 0xbb, 0x90, 0xfd,
0xe3, 0xce, 0x5a, 0xd0, 0x4f, 0x2f, 0xd9, 0xc5, 0xa6, 0xd2, 0x83, 0x97, 0x85, 0x37, 0xf3, 0xdb,
0x37, 0xcc, 0x1b, 0x98, 0x4a, 0xed, 0xd1, 0x69, 0xd1, 0x16, 0xc2, 0xca, 0x82, 0xbe, 0x74, 0xbd,
0x73, 0x46, 0xcb, 0x6f, 0x74, 0xdc, 0x3a, 0xe3, 0x0d, 0x63, 0xc6, 0xa2, 0x56, 0xc2, 0xd7, 0x3b,
0xfe, 0x8b, 0x4e, 0x58, 0xc7, 0x2a, 0x24, 0x12, 0xef, 0x48, 0x91, 0x9b, 0xaf, 0x80, 0xbd, 0xf6,
0xbf, 0x4b, 0xfc, 0xf8, 0x44, 0xf2, 0xec, 0x1e, 0x52, 0xeb, 0x8c, 0x35, 0x24, 0xda, 0x71, 0x32,
0x4b, 0xf2, 0x6c, 0x39, 0xe2, 0xbd, 0xe1, 0xba, 0x7b, 0xcb, 0x03, 0x31, 0xdf, 0xc3, 0xf5, 0x1f,
0x0f, 0xb2, 0x46, 0x13, 0xb2, 0x5b, 0x00, 0xf2, 0xc2, 0xf9, 0x8d, 0x52, 0x5b, 0x0a, 0x36, 0x69,
0x39, 0x0c, 0x95, 0xb5, 0xda, 0x12, 0x9b, 0x42, 0x56, 0x0b, 0x5d, 0x63, 0x1b, 0xfb, 0x67, 0xa1,
0x0f, 0xb1, 0x14, 0x80, 0x1b, 0x48, 0xc3, 0xbc, 0x8d, 0x6c, 0xc6, 0x83, 0x59, 0x92, 0x0f, 0xcb,
0xcb, 0xa0, 0x9f, 0x9b, 0x97, 0x41, 0x7a, 0x3e, 0x1a, 0x2c, 0x1d, 0x5c, 0x1d, 0xcd, 0x75, 0xac,
0x82, 0xec, 0x48, 0xb3, 0x05, 0x3f, 0xcd, 0x80, 0x9f, 0x2e, 0x3b, 0xb9, 0xfb, 0x97, 0x8b, 0x0b,
0xe5, 0xc9, 0x63, 0xb2, 0xca, 0xdf, 0x16, 0x1d, 0xfd, 0x10, 0xf1, 0x06, 0xf7, 0x45, 0x2f, 0x8b,
0xc3, 0x51, 0xa4, 0xad, 0xaa, 0x8b, 0x10, 0xf0, 0xd3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84,
0xba, 0xf6, 0x41, 0xab, 0x01, 0x00, 0x00,
func file_internal_api_synchronizer_proto_rawDescGZIP() []byte {
file_internal_api_synchronizer_proto_rawDescOnce.Do(func() {
file_internal_api_synchronizer_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_api_synchronizer_proto_rawDescData)
})
return file_internal_api_synchronizer_proto_rawDescData
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// SynchronizerClient is the client API for Synchronizer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SynchronizerClient interface {
// Synchronize signals the caller when it is safe to run mmfs, collects the
// mmfs' proposals, and returns the evaluated matches.
Synchronize(ctx context.Context, opts ...grpc.CallOption) (Synchronizer_SynchronizeClient, error)
var file_internal_api_synchronizer_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_internal_api_synchronizer_proto_goTypes = []interface{}{
(*SynchronizeRequest)(nil), // 0: openmatch.internal.SynchronizeRequest
(*SynchronizeResponse)(nil), // 1: openmatch.internal.SynchronizeResponse
(*pb.Match)(nil), // 2: openmatch.Match
}
var file_internal_api_synchronizer_proto_depIdxs = []int32{
2, // 0: openmatch.internal.SynchronizeRequest.proposal:type_name -> openmatch.Match
0, // 1: openmatch.internal.Synchronizer.Synchronize:input_type -> openmatch.internal.SynchronizeRequest
1, // 2: openmatch.internal.Synchronizer.Synchronize:output_type -> openmatch.internal.SynchronizeResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
type synchronizerClient struct {
cc *grpc.ClientConn
}
func NewSynchronizerClient(cc *grpc.ClientConn) SynchronizerClient {
return &synchronizerClient{cc}
}
func (c *synchronizerClient) Synchronize(ctx context.Context, opts ...grpc.CallOption) (Synchronizer_SynchronizeClient, error) {
stream, err := c.cc.NewStream(ctx, &_Synchronizer_serviceDesc.Streams[0], "/openmatch.internal.Synchronizer/Synchronize", opts...)
if err != nil {
return nil, err
func init() { file_internal_api_synchronizer_proto_init() }
func file_internal_api_synchronizer_proto_init() {
if File_internal_api_synchronizer_proto != nil {
return
}
x := &synchronizerSynchronizeClient{stream}
return x, nil
}
type Synchronizer_SynchronizeClient interface {
Send(*SynchronizeRequest) error
Recv() (*SynchronizeResponse, error)
grpc.ClientStream
}
type synchronizerSynchronizeClient struct {
grpc.ClientStream
}
func (x *synchronizerSynchronizeClient) Send(m *SynchronizeRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *synchronizerSynchronizeClient) Recv() (*SynchronizeResponse, error) {
m := new(SynchronizeResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
if !protoimpl.UnsafeEnabled {
file_internal_api_synchronizer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_api_synchronizer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
return m, nil
}
// SynchronizerServer is the server API for Synchronizer service.
type SynchronizerServer interface {
// Synchronize signals the caller when it is safe to run mmfs, collects the
// mmfs' proposals, and returns the evaluated matches.
Synchronize(Synchronizer_SynchronizeServer) error
}
// UnimplementedSynchronizerServer can be embedded to have forward compatible implementations.
type UnimplementedSynchronizerServer struct {
}
func (*UnimplementedSynchronizerServer) Synchronize(srv Synchronizer_SynchronizeServer) error {
return status.Errorf(codes.Unimplemented, "method Synchronize not implemented")
}
func RegisterSynchronizerServer(s *grpc.Server, srv SynchronizerServer) {
s.RegisterService(&_Synchronizer_serviceDesc, srv)
}
func _Synchronizer_Synchronize_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SynchronizerServer).Synchronize(&synchronizerSynchronizeServer{stream})
}
type Synchronizer_SynchronizeServer interface {
Send(*SynchronizeResponse) error
Recv() (*SynchronizeRequest, error)
grpc.ServerStream
}
type synchronizerSynchronizeServer struct {
grpc.ServerStream
}
func (x *synchronizerSynchronizeServer) Send(m *SynchronizeResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *synchronizerSynchronizeServer) Recv() (*SynchronizeRequest, error) {
m := new(SynchronizeRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Synchronizer_serviceDesc = grpc.ServiceDesc{
ServiceName: "openmatch.internal.Synchronizer",
HandlerType: (*SynchronizerServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Synchronize",
Handler: _Synchronizer_Synchronize_Handler,
ServerStreams: true,
ClientStreams: true,
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_api_synchronizer_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
},
Metadata: "internal/api/synchronizer.proto",
GoTypes: file_internal_api_synchronizer_proto_goTypes,
DependencyIndexes: file_internal_api_synchronizer_proto_depIdxs,
MessageInfos: file_internal_api_synchronizer_proto_msgTypes,
}.Build()
File_internal_api_synchronizer_proto = out.File
file_internal_api_synchronizer_proto_rawDesc = nil
file_internal_api_synchronizer_proto_goTypes = nil
file_internal_api_synchronizer_proto_depIdxs = nil
}

@ -0,0 +1,157 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.24.0
// source: internal/api/synchronizer.proto
package ipb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
Synchronizer_Synchronize_FullMethodName = "/openmatch.internal.Synchronizer/Synchronize"
)
// SynchronizerClient is the client API for Synchronizer service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SynchronizerClient interface {
// Synchronize signals the caller when it is safe to run mmfs, collects the
// mmfs' proposals, and returns the evaluated matches.
Synchronize(ctx context.Context, opts ...grpc.CallOption) (Synchronizer_SynchronizeClient, error)
}
type synchronizerClient struct {
cc grpc.ClientConnInterface
}
func NewSynchronizerClient(cc grpc.ClientConnInterface) SynchronizerClient {
return &synchronizerClient{cc}
}
func (c *synchronizerClient) Synchronize(ctx context.Context, opts ...grpc.CallOption) (Synchronizer_SynchronizeClient, error) {
stream, err := c.cc.NewStream(ctx, &Synchronizer_ServiceDesc.Streams[0], Synchronizer_Synchronize_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &synchronizerSynchronizeClient{stream}
return x, nil
}
type Synchronizer_SynchronizeClient interface {
Send(*SynchronizeRequest) error
Recv() (*SynchronizeResponse, error)
grpc.ClientStream
}
type synchronizerSynchronizeClient struct {
grpc.ClientStream
}
func (x *synchronizerSynchronizeClient) Send(m *SynchronizeRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *synchronizerSynchronizeClient) Recv() (*SynchronizeResponse, error) {
m := new(SynchronizeResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// SynchronizerServer is the server API for Synchronizer service.
// All implementations should embed UnimplementedSynchronizerServer
// for forward compatibility
type SynchronizerServer interface {
// Synchronize signals the caller when it is safe to run mmfs, collects the
// mmfs' proposals, and returns the evaluated matches.
Synchronize(Synchronizer_SynchronizeServer) error
}
// UnimplementedSynchronizerServer should be embedded to have forward compatible implementations.
type UnimplementedSynchronizerServer struct {
}
func (UnimplementedSynchronizerServer) Synchronize(Synchronizer_SynchronizeServer) error {
return status.Errorf(codes.Unimplemented, "method Synchronize not implemented")
}
// UnsafeSynchronizerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SynchronizerServer will
// result in compilation errors.
type UnsafeSynchronizerServer interface {
mustEmbedUnimplementedSynchronizerServer()
}
func RegisterSynchronizerServer(s grpc.ServiceRegistrar, srv SynchronizerServer) {
s.RegisterService(&Synchronizer_ServiceDesc, srv)
}
func _Synchronizer_Synchronize_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SynchronizerServer).Synchronize(&synchronizerSynchronizeServer{stream})
}
type Synchronizer_SynchronizeServer interface {
Send(*SynchronizeResponse) error
Recv() (*SynchronizeRequest, error)
grpc.ServerStream
}
type synchronizerSynchronizeServer struct {
grpc.ServerStream
}
func (x *synchronizerSynchronizeServer) Send(m *SynchronizeResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *synchronizerSynchronizeServer) Recv() (*SynchronizeRequest, error) {
m := new(SynchronizeRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Synchronizer_ServiceDesc is the grpc.ServiceDesc for Synchronizer service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Synchronizer_ServiceDesc = grpc.ServiceDesc{
ServiceName: "openmatch.internal.Synchronizer",
HandlerType: (*SynchronizerServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Synchronize",
Handler: _Synchronizer_Synchronize_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "internal/api/synchronizer.proto",
}

@ -24,8 +24,8 @@ import (
)
// ConfigureLogging sets up open match logrus instance using the logging section of the matchmaker_config.json
// - log line format (text[default] or json)
// - min log level to include (debug, info [default], warn, error, fatal, panic)
// - log line format (text[default] or json)
// - min log level to include (debug, info [default], warn, error, fatal, panic)
func ConfigureLogging(cfg config.View) {
logrus.SetFormatter(newFormatter(cfg.GetString("logging.format")))
level := toLevel(cfg.GetString("logging.level"))

@ -37,6 +37,7 @@ import (
"go.opencensus.io/plugin/ochttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
"open-match.dev/open-match/internal/config"
@ -134,7 +135,7 @@ func GRPCClientFromEndpoint(cfg config.View, address string) (*grpc.ClientConn,
grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(tc))
} else {
grpcOptions = append(grpcOptions, grpc.WithInsecure())
grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
return grpc.Dial(address, grpcOptions...)
@ -152,7 +153,7 @@ func GRPCClientFromParams(params *ClientParams) (*grpc.ClientConn, error) {
}
grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(trustedCertPool, "")))
} else {
grpcOptions = append(grpcOptions, grpc.WithInsecure())
grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
return grpc.Dial(params.Address, grpcOptions...)

@ -22,39 +22,51 @@ import (
"os"
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/telemetry"
shellTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
shellTesting "open-match.dev/open-match/testing"
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
)
func TestSecureGRPCFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
defer closer()
runGrpcClientTests(t, require, cfg, rpcParams)
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
}
func TestInsecureGRPCFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
defer closer()
runGrpcClientTests(t, require, cfg, rpcParams)
runSuccessGrpcClientTests(t, require, cfg, rpcParams)
}
func TestUnavailableGRPCFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "badhost")
defer closer()
runFailureGrpcClientTests(t, require, cfg, rpcParams, codes.Unavailable)
}
func TestHTTPSFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, true, "localhost")
defer closer()
runHTTPClientTests(require, cfg, rpcParams)
@ -63,7 +75,7 @@ func TestHTTPSFromConfig(t *testing.T) {
func TestInsecureHTTPFromConfig(t *testing.T) {
require := require.New(t)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false)
cfg, rpcParams, closer := configureConfigAndKeysForTesting(t, require, false, "localhost")
defer closer()
runHTTPClientTests(require, cfg, rpcParams)
@ -96,7 +108,7 @@ func TestSanitizeHTTPAddress(t *testing.T) {
}
}
func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
func setupClientConnection(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) *grpc.ClientConn {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
rpcParams.AddHandleFunc(func(s *grpc.Server) {
@ -104,7 +116,9 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := &Server{}
defer s.Stop()
t.Cleanup(func() {
defer s.Stop()
})
err := s.Start(rpcParams)
require.Nil(err)
@ -112,6 +126,11 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
grpcConn, err := GRPCClientFromConfig(cfg, "test")
require.Nil(err)
require.NotNil(grpcConn)
return grpcConn
}
func runSuccessGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
@ -121,6 +140,20 @@ func runGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.Vi
require.NotNil(grpcResp)
}
func runFailureGrpcClientTests(t *testing.T, require *require.Assertions, cfg config.View, rpcParams *ServerParams, expectedCode codes.Code) {
grpcConn := setupClientConnection(t, require, cfg, rpcParams)
// Confirm the client works as expected
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(grpcConn)
grpcResp, err := feClient.CreateTicket(ctx, &pb.CreateTicketRequest{})
require.Error(err)
require.Nil(grpcResp)
code := status.Code(err)
require.Equal(expectedCode, code)
}
func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams *ServerParams) {
// Serve a fake frontend server and wait for its full start up
ff := &shellTesting.FakeFrontend{}
@ -157,7 +190,7 @@ func runHTTPClientTests(require *require.Assertions, cfg config.View, rpcParams
}
// Generate a config view and optional TLS key manifests (optional) for testing
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions, tlsEnabled bool, host string) (config.View, *ServerParams, func()) {
// Create netlisteners on random ports used for rpc serving
grpcL := MustListen()
httpL := MustListen()
@ -165,7 +198,7 @@ func configureConfigAndKeysForTesting(t *testing.T, require *require.Assertions,
// Generate a config view with paths to the manifests
cfg := viper.New()
cfg.Set("test.hostname", "localhost")
cfg.Set("test.hostname", host)
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
cfg.Set("test.httpport", MustGetPortNumber(httpL))

@ -19,9 +19,11 @@ import (
"net"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/encoding/protojson"
"open-match.dev/open-match/internal/telemetry"
)
@ -37,7 +39,19 @@ type insecureServer struct {
func (s *insecureServer) start(params *ServerParams) error {
s.httpMux = params.ServeMux
s.proxyMux = runtime.NewServeMux()
s.proxyMux = runtime.NewServeMux(
runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.HTTPBodyMarshaler{
Marshaler: &runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
UseProtoNames: true,
EmitUnpopulated: false,
},
UnmarshalOptions: protojson.UnmarshalOptions{
DiscardUnknown: true,
},
},
}),
)
// Configure the gRPC server.
s.grpcServer = grpc.NewServer(newGRPCServerOptions(params)...)
@ -60,7 +74,7 @@ func (s *insecureServer) start(params *ServerParams) error {
for _, handlerFunc := range params.handlersForGrpcProxy {
dialOpts := newGRPCDialOptions(params.enableMetrics, params.enableRPCLogging, params.enableRPCPayloadLogging)
dialOpts = append(dialOpts, grpc.WithInsecure())
dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err := handlerFunc(ctx, s.proxyMux, s.grpcListener.Addr().String(), dialOpts); err != nil {
cancel()
return errors.WithStack(err)

@ -24,7 +24,8 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
shellTesting "open-match.dev/open-match/internal/testing"
"google.golang.org/grpc/credentials/insecure"
shellTesting "open-match.dev/open-match/testing"
)
func TestInsecureStartStop(t *testing.T) {
@ -42,7 +43,7 @@ func TestInsecureStartStop(t *testing.T) {
err := s.start(params)
require.Nil(err)
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithTransportCredentials(insecure.NewCredentials()))
require.Nil(err)
defer conn.Close()

@ -28,7 +28,7 @@ import (
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_tracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_validator "github.com/grpc-ecosystem/go-grpc-middleware/validator"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/plugin/ocgrpc"

@ -24,10 +24,11 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"open-match.dev/open-match/internal/telemetry"
shellTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
shellTesting "open-match.dev/open-match/testing"
)
func TestStartStopServer(t *testing.T) {
@ -46,7 +47,7 @@ func TestStartStopServer(t *testing.T) {
err := s.Start(params)
require.Nil(err)
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithTransportCredentials(insecure.NewCredentials()))
require.Nil(err)
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))

@ -21,10 +21,11 @@ import (
"net"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/protobuf/encoding/protojson"
"open-match.dev/open-match/internal/telemetry"
)
@ -45,7 +46,19 @@ type tlsServer struct {
func (s *tlsServer) start(params *ServerParams) error {
s.httpMux = params.ServeMux
s.proxyMux = runtime.NewServeMux()
s.proxyMux = runtime.NewServeMux(
runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.HTTPBodyMarshaler{
Marshaler: &runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
UseProtoNames: true,
EmitUnpopulated: false,
},
UnmarshalOptions: protojson.UnmarshalOptions{
DiscardUnknown: true,
},
},
}),
)
_, grpcPort, err := net.SplitHostPort(s.grpcListener.Addr().String())
if err != nil {

@ -25,8 +25,8 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
shellTesting "open-match.dev/open-match/internal/testing"
"open-match.dev/open-match/pkg/pb"
shellTesting "open-match.dev/open-match/testing"
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
)

Some files were not shown because too many files have changed in this diff Show More