Compare commits

..

137 Commits

Author SHA1 Message Date
82d034f8e4 Fix dependency issues in the build. 2019-04-01 11:05:57 -07:00
97eed146da update protoc version to 3.7.1
This fixes the bug outlined here https://github.com/protocolbuffers/protobuf/issues/5875
2019-04-01 09:49:19 -07:00
6dd23ff6ad Merge pull request #135 from jeremyje/master
Merge 040wip into master.
2019-03-29 14:29:22 -07:00
03c7db7680 Merge 040wip 2019-03-28 11:12:07 -07:00
e5538401f6 Update protobuf definitions 2019-03-26 17:45:52 -07:00
eaa811f9ac Add example helm chart, replace example dashboard. 2019-03-26 17:45:28 -07:00
3b1c6b9141 Merge 2019-03-26 15:26:17 -07:00
34f9eb9bd3 Building again 2019-03-26 12:31:19 -07:00
3ad7f75fb4 Attempt to fix the build 2019-03-26 12:31:19 -07:00
78bd48118d Tweaks 2019-03-26 12:31:19 -07:00
3e71894111 Merge 2019-03-26 12:31:19 -07:00
36decb4068 Merge 2019-03-26 12:31:19 -07:00
f79b782a3a Go Modules 2019-03-26 11:14:48 -07:00
db186e55ff Move Dockfiles to build C#, Golang, PHP, and Python3 MMFs. 2019-03-26 09:54:10 -07:00
957465ce51 Remove dead code that was moved to internal/app/mmlogicapi/apisrv/ 2019-03-25 16:14:25 -07:00
478eb61589 Delete unnecessary copy of protos in frontendclient. 2019-03-25 16:13:56 -07:00
6d2a5b743b Remote executable bit from files that are not executable. 2019-03-13 09:31:24 -07:00
9c943d5a10 Fix comment 2019-03-12 22:04:42 -07:00
8293d44ee0 Fix typos in comments, set and playerindices 2019-03-12 22:04:42 -07:00
a3bd862e76 store optional Redis password inside the Secret 2019-03-12 21:52:59 -07:00
c424d5eac9 Update .gcloudignore to include .gitignore's filters so that Cloud Build packages don't upload binaries. 2019-03-11 16:29:50 +09:00
2e6f5173e0 Add Prometheus service discovery annotations to the Open Match servers. 2019-03-11 16:25:21 +09:00
ee4bba44ec Makefile for simpler development 2019-03-11 16:14:00 +09:00
8e923a4328 Use grpc error codes for responses. 2019-03-11 16:13:06 +09:00
52efa04ee6 Add RPC dashboard and instructions to add more dashboards. 2019-03-07 10:58:53 -08:00
67d4965648 Helm charts for open-match, prometheus, and grafana 2019-03-06 17:09:09 -08:00
7a7b1cb305 Open Match CI support via Cloud Build 2019-03-04 09:41:19 -08:00
377a9621ff Improve error handling of Redis open connection failures. 2019-02-27 19:35:23 -08:00
432dd5a504 Consolidate Ctrl+Break handling into it's own go package. 2019-02-27 17:52:58 +01:00
7446f5b1eb Move out Ctrl+Break wait signal to it's own package. 2019-02-27 17:52:58 +01:00
15ea999628 Remove init() methods from OM servers since they aren't needed. 2019-02-27 08:58:39 +01:00
b5367ea3aa Add config/ in the search path for configuration so that PWD/config can be used as a ConfigMap mount path. 2019-02-25 16:49:35 -08:00
e022c02cb6 golang mmf serving harness 2019-02-25 04:54:02 -05:00
a13455d5b0 Move application logic from cmd/ to internal/app/ 2019-02-24 13:56:48 +01:00
16741409e7 Cleaner builds using svn for github 2019-02-19 09:24:50 -05:00
d7e8f8b3fa Testing 2019-02-19 07:30:26 -05:00
8c97c8f141 Testing2 2019-02-19 07:26:11 -05:00
6a8755a13d Testing 2019-02-19 07:24:10 -05:00
4ed6d275a3 remove player from ignorelists on frontend.DeletePlayer call 2019-02-19 20:01:29 +09:00
cb49eb8646 Merge remote-tracking branch 'origin/calebatwd/knative-rest-mmf' into 040wip 2019-02-16 04:01:01 -05:00
a7458dabf2 Fix test/example paths 2019-02-14 10:56:33 +09:00
5856b7d873 Merge branch '040wip' of https://github.com/GoogleCloudPlatform/open-match into 040wip 2019-02-11 01:23:06 -05:00
7733824c21 Remove matchmaking config file from base image 2019-02-11 01:22:23 -05:00
f1d261044b Add function port to config 2019-02-11 01:21:28 -05:00
95820431ab Update dev instuctions 2019-02-11 01:20:55 -05:00
0002ecbdb2 Review feedback. 2019-02-09 15:28:48 +09:00
2eb51b5270 Fix build and test breakages 2019-02-09 15:28:48 +09:00
1847f79571 Convert JSON k8s deployment configs to YAML. 2019-02-09 15:17:22 +09:00
58ff12f3f8 Add stackdriver format support via TV4/logrus-stackdriver-formatter. Simply set format in config to stackdriver 2019-02-09 15:14:00 +09:00
b0b7b4bd15 Update git ignore to ignore goland ide files 2019-02-09 15:09:00 +09:00
f3f1f36099 Comment type 2019-02-08 14:21:36 -08:00
f8cfb1b90f Add rest call support to job scheduling. This is a prototype implementation to support knative experimentation. 2019-02-08 14:20:29 -08:00
393e1d6de2 added configurable backoff to MatchObject and Player watchers 2019-02-08 16:19:52 +09:00
a11556433b Merge branch 'master' into 040wip 2019-02-08 01:48:54 -05:00
3ee9c05db7 Merge upstream changes 2019-02-08 01:47:43 -05:00
de7ba2db6b added demo attr to player indices 2019-02-03 20:17:13 -08:00
8393454158 fixes for configmap 2019-02-03 20:17:13 -08:00
6b93ac7663 configmap for matchmaker config 2019-02-03 20:17:13 -08:00
fe2410e9d8 PHP MMF: move cfg values to env vars 2019-02-03 20:17:13 -08:00
d8ecf1c439 doc update 2019-02-03 20:17:13 -08:00
8577f6bd4d Move cfg values to env vars for MMFs 2019-02-03 20:17:13 -08:00
470be06d16 fixed set.Difference() 2019-01-29 22:38:18 -08:00
c6e4dae79b fix google cloud knative url 2019-01-25 11:38:46 -08:00
23f83eddd1 mmlogic GetPlayerPool bugfix 2019-01-23 19:57:36 -05:00
dd794fd004 py3 mmf empty pools bugfix 2019-01-23 19:57:16 -05:00
f234433e33 write to error if all pools are empty in py3 mmf 2019-01-23 19:57:16 -05:00
d52773543d check for empty pools in py3 mmf 2019-01-23 19:57:16 -05:00
bd4ab0b530 mmlogic GetPlayerPool bugfix 2019-01-23 14:18:00 +03:00
6b9cd11be3 fix py3 mmf 2019-01-16 18:01:10 +03:00
1443bd1e80 PHP MMF: move cfg values to env vars 2019-01-16 13:41:44 +03:00
3fd8081dc5 doc update 2019-01-15 11:58:42 -05:00
dda949a6a4 Move cfg values to env vars for MMFs 2019-01-15 11:25:02 -05:00
128f0a2941 Merge branch 'master' of https://github.com/GoogleCloudPlatform/open-match 2019-01-15 09:42:01 -05:00
5f8a57398a Fix cloud build issue caused by 5f827b5c7c81c79ef9341cbebb51880f74b78a35 2019-01-15 09:41:38 -05:00
327d64611b This time with working hyperlink 2019-01-14 23:44:10 +09:00
5b4cdce610 Bump version number 2019-01-14 23:43:11 +09:00
56e08e82d4 Revert accidental file type change 2019-01-14 09:32:13 -05:00
2df027c9f6 Bold release numbers 2019-01-10 00:28:31 -05:00
913af84931 Use public repo URL 2019-01-09 02:18:53 -05:00
de6064f9fd Use public repo URL 2019-01-09 02:18:22 -05:00
867c55a409 Fix registry URL and add symlink issue 2019-01-09 02:15:11 -05:00
36420be2ce Revert accidental removal of symlink 2019-01-09 02:14:32 -05:00
16e9dda64a Bugfix for no commandline args 2019-01-09 02:14:07 -05:00
1ef9a896bf Revert accidental commit of empty file 2019-01-09 02:13:30 -05:00
75f2b84ded Up default timeout 2019-01-09 02:03:47 -05:00
2268baf1ba revert accidential commit of local change 2019-01-09 02:00:36 -05:00
9e43d989ea Remove debug sleep command 2019-01-09 00:10:47 -05:00
869725baee Bump k8s version 2019-01-08 23:56:07 -05:00
ae26ac3cd3 Merge remote-tracking branch 'origin/master' into 030wip 2019-01-08 23:41:55 -05:00
826af77396 Point to public registry and update tag 2019-01-08 23:37:38 -05:00
294d03e18b Roadmap 2019-01-08 22:39:08 -05:00
b27116aedd 030 RC2 2019-01-08 02:19:53 -05:00
074c0584f5 030 RC1 issue thread updates https://github.com/GoogleCloudPlatform/open-match/pull/55 2019-01-07 23:35:42 -05:00
210e00703a production guide now has placeholder notes, low hanging fruit 2019-01-07 23:35:14 -05:00
3ffbddbdd8 Updates to add optional TTL to redis objects 2019-01-05 23:37:38 -05:00
5f827b5c7c doesn't work 2019-01-05 23:01:33 -05:00
a161e6dba9 030 WIP first pass 2018-12-30 05:31:49 -05:00
7e70683d9b fix broken sed command 2018-12-30 04:34:27 -05:00
38bd94c078 Merge NoFr1ends commit 6a5dc1c 2018-12-30 04:16:48 -05:00
83366498d3 Update Docs 2018-12-30 03:45:39 -05:00
929e089e4d rename api call 2018-12-30 03:35:25 -05:00
a6b56b19d2 Merge branch to address issue #42 2018-12-28 04:01:59 -05:00
c2b6fdc198 Updates to FEClient and protos 2018-12-28 02:48:03 -05:00
43a4f046f0 Update config 2018-12-27 03:14:40 -05:00
b79bc2591c Remove references to connstring 2018-12-27 03:07:26 -05:00
61198fd168 No unused code 2018-12-27 03:04:18 -05:00
c1dd3835fe Updated logging 2018-12-27 02:55:16 -05:00
f3c9e87653 updates to documentation and builds 2018-12-27 02:28:43 -05:00
0064116c34 Further deletion and fix indexing for empty fields 2018-12-27 02:09:20 -05:00
298fe18f29 Updates to player deletion logic, metadata indices 2018-12-27 01:27:39 -05:00
6c539ab2a4 Remove manual filenames in logs 2018-12-26 07:43:54 -05:00
b6c59a7a0a Player watcher for FEAPI brought over from Doodle 2018-12-26 07:29:28 -05:00
f0536cedde Merge Ilya's updates 2018-12-26 00:18:00 -05:00
48fa4ba962 Update Redis HA details 2018-12-25 23:58:54 -05:00
39ff99b65e rename 'redis-sentinel' to just 'redis' 2018-12-26 13:51:24 +09:00
78c7b3b949 redis failover deployment 2018-12-26 13:51:24 +09:00
6a5dc1c508 Fix typo in development guide 2018-12-26 13:49:54 +09:00
9f84ec9bc9 First pass. Works but hacky. 2018-12-25 23:47:30 -05:00
e48b7db56f #51 Fix parsing of empty matchobject fields 2018-12-26 13:45:40 +09:00
bffd54727c Merge branch 'udptest' into test_agones 2018-12-19 02:59:04 -05:00
ab90f5f6e0 got udp test workign 2018-12-19 02:56:20 -05:00
632415c746 simple udp client & server to integrate with agones 2018-12-18 23:58:02 +03:00
0882c63eb1 Update messages; more redis code sequestered to redis module 2018-12-16 08:12:42 -05:00
ee6716c60e Merge PL 47 2018-12-15 23:56:35 -05:00
bb5ad8a596 Merge 951bc8509d5eb8fceb138135c001c6a7b7f9bb25 into 275fa2d125e91fd25981124387f6388431f73874 2018-12-15 19:32:28 +00:00
951bc8509d Remove strings import as it's no longer used 2018-12-15 14:11:31 -05:00
ab8cd21633 Update to use Xid instead of UUID. 2018-12-15 14:11:05 -05:00
721cd2f7ae Still needs make file or the like and updated instructions 2018-12-10 14:05:00 +09:00
13cd1da631 Merge remote-tracking branch 'origin/json-logging' into feupdate 2018-12-06 23:28:35 -05:00
275fa2d125 Awkward wording 2018-12-07 13:17:39 +09:00
486c64798b Merge tag '020rc2' into feupdate 2018-12-06 02:14:58 -05:00
52f9e2810f WIP indexing 2018-11-28 04:10:08 -05:00
db60d7ac5f Merge from 0.2.0 2018-11-28 02:23:26 -05:00
3fcedbf13b Remove enum status states. No justification yet. 2018-11-26 17:42:08 -08:00
274edaae2e Grpc code for calling functions in mmforc 2018-11-26 17:40:25 -08:00
8ed865d300 Initial function messages plus protoc regen 2018-11-26 17:05:42 -08:00
326dd6c6dd Add logging config to support json and level selection for logrus 2018-11-17 16:11:33 -08:00
8414 changed files with 344612 additions and 5042 deletions

11
.dockerignore Normal file
View File

@ -0,0 +1,11 @@
# Compiled Binaries
cmd/backendapi/backendapi
cmd/frontendapi/frontendapi
cmd/mmforc/mmforc
cmd/mmlogicapi/mmlogicapi
examples/backendclient/backendclient
examples/evaluators/golang/simple/simple
examples/functions/golang/manual-simple/manual-simple
test/cmd/clientloadgen/clientloadgen
test/cmd/frontendclient/frontendclient
build/

15
.gcloudignore Normal file
View File

@ -0,0 +1,15 @@
# This file specifies files that are *not* uploaded to Google Cloud Platform
# using gcloud. It follows the same syntax as .gitignore, with the addition of
# "#!include" directives (which insert the entries of the given .gitignore-style
# file at that point).
#
# For more information, run:
# $ gcloud topic gcloudignore
#
.gcloudignore
# If you would like to upload your .git directory, .gitignore file or files
# from your .gitignore file, remove the corresponding line
# below:
.git
.gitignore
#!include:.gitignore

27
.gitignore vendored
View File

@ -24,9 +24,13 @@
*.cities
populations
# local config files
#*.json
# Discarded code snippets
build.sh
*-fast.yaml
detritus/
# Dotnet Core ignores
*.swp
@ -63,3 +67,26 @@ msbuild.wrn
# Visual Studio 2015
.vs/
# Goland
.idea/
# Nodejs files placed when building Hugo, ok to allow if we actually start using Nodejs.
package.json
package-lock.json
site/resources/_gen/
# Node Modules
node_modules/
# Compiled Binaries
cmd/backendapi/backendapi
cmd/frontendapi/frontendapi
cmd/mmforc/mmforc
cmd/mmlogicapi/mmlogicapi
examples/backendclient/backendclient
examples/evaluators/golang/simple/simple
examples/functions/golang/manual-simple/manual-simple
test/cmd/clientloadgen/clientloadgen
test/cmd/frontendclient/frontendclient

View File

@ -1,20 +1,48 @@
# Release history
##v0.2.0 (alpha)
## v0.4.0 (alpha)
### Release notes
- Thanks to completion of Issues [#42](issues/42) and [#45](issues/45), there is no longer a need to use the `openmatch-base` image when building components of Open Match. Each stand alone appliation now is self-contained in its `Dockerfile` and `cloudbuild.yaml` files, and builds have been substantially simplified. **Note**: The default `Dockerfile` and `cloudbuild.yaml` now tag their images with the version number, not `dev`, and the YAML files in the `install` directory now reflect this.
- This paves the way for CI/CD in an upcoming version.
- This paves the way for public images in an upcoming version!
## v0.3.0 (alpha)
This update is focused on the Frontend API and Player Records, including more robust code for indexing, deindexing, reading, writing, and expiring player requests from Open Match state storage. All Frontend API function argument have changed, although many only slightly. Please join the [Slack channel](https://open-match.slack.com/) if you need help ([Signup link](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLWQzMzE1MGY5YmYyYWY3ZjE2MjNjZTdmYmQ1ZTQzMmNiNGViYmQyN2M4ZmVkMDY2YzZlOTUwMTYwMzI1Y2I2MjU))!
### Release notes
- The Frontend API calls have all be changed to reflect the fact that they operate on Players in state storage. To queue a game client, 'CreatePlayer' in Open Match, to get updates 'GetUpdates', and to stop matching, 'DeletePlayer'. The calls are now much more obviously related to how Open Match sees players: they are database records that it creates on demand, updates using MMFs and the Backend API, and deletes when the player is no longer looking for a match.
- The Player record in state storage has changed to a more complete hash format, and it no longer makes sense to remove a player's assignment from the Frontend as a separate action to removing their record entirely. `DeleteAssignment()` has therefore been removed. Just use `DeletePlayer` instead; you'll always want the client to re-request matching with its latest attributes anyway.
- There is now a module for [indexing and deindexing players in state storage](internal/statestorage/redis/playerindices/playerindices.go). This is a *much* more efficient, as well as being cleaner and more maintainable than the previous implementation which was **hard-coded to index everything** you passed in to the Frontend API at a specific JSON object depth.
- This paves the way for dynamically choosing your indicies without restarting the matchmaker. This will be implemented if there is demand. Pull Requests are welcome!
- Two internal timestamp-based indices have replaced the previous `timestamp` index. `created` is used to calculate how long a player has been waiting for a match, `accessed` is used to determine when a player needs to be expired out of state storage. Both are prefixed by the string `OM_METADATA` so it should be easy to spot them.
- A call to the Frontend API `GetUpdates()` gRPC endpoint returns a stream of player messages. This is used to send updates to state storage for the `Assignment`, `Status`, and `Error` Player fields in near-realtime. **It is the responsibility of the game client to disconnect** from the stream when it has gotten the results it was waiting for!
- Moved the rest of the gRPC messages into a shared [`messages.proto` file](api/protobuf-spec/messages.proto).
- Added documentation to Frontend API gRPC calls to the [`frontend.proto` file](api/protobuf-spec/frontend.proto).
- [Issue #41](https://github.com/GoogleCloudPlatform/open-match/issues/41)|[PR #48](https://github.com/GoogleCloudPlatform/open-match/pull/48) There is now a HA Redis install available in `install/yaml/01-redis-failover.yaml`. This would be used as a drop-in replacement for a single-instance Redis configuration in `install/yaml/01-redis.yaml`. The HA configuration requires that you install the [Redis Operator](https://github.com/spotahome/redis-operator) (note: **currently alpha**, use at your own risk) in your Kubernetes cluster.
- As part of this change, the kubernetes service name is now `redis` not `redis-sentinel` to denote that it is accessed using a standard Redis client.
- Open Match uses a new feature of the go module [logrus](github.com/sirupsen/logrus) to include filenames and line numbers. If you have an older version in your local build environment, you may need to delete the module and `go get github.com/sirupsen/logrus` again. When building using the provided `cloudbuild.yaml` and `Dockerfile`s this is handled for you.
- The program that was formerly in `examples/frontendclient` has been expanded and has been moved to the `test` directory under (`test/cmd/frontendclient/`)[test/cmd/frontendclient/].
- The client load generator program has been moved from `test/cmd/client` to (`test/cmd/clientloadgen/`)[test/cmd/clientloadgen/] to better reflect what it does.
- [Issue #45](https://github.com/GoogleCloudPlatform/open-match/issues/45) The process for moving the build files (`Dockerfile` and `cloudbuild.yaml`) for each component, example, and test program to their respective directories and out of the repository root has started but won't be completed until a future version.
- Put some basic notes in the [production guide](docs/production.md)
- Added a basic [roadmap](docs/roadmap.md)
## v0.2.0 (alpha)
This is a pretty large update. Custom MMFs or evaluators from 0.1.0 may need some tweaking to work with this version. Some Backend API function arguments have changed. Please join the [Slack channel](https://open-match.slack.com/) if you need help ([Signup link](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLWQzMzE1MGY5YmYyYWY3ZjE2MjNjZTdmYmQ1ZTQzMmNiNGViYmQyN2M4ZmVkMDY2YzZlOTUwMTYwMzI1Y2I2MjU))!
v0.2.0 focused on adding additional functionality to Backend API calls and on **reducing the amount of boilerplate code required to make a custom Matchmaking Function**. For this, a new internal API for use by MMFs called the [Matchmaking Logic API (MMLogic API)](README.md#matchmaking-logic-mmlogic-api) has been added. Many of the core components and examples had to be updated to use the new Backend API arguments and the modules to support them, so we recommend you rebuild and redeploy all the components to use v0.2.0.
### Release notes
- MMLogic API is now available. Deploy it to kubernetes using the [appropriate json file]() and check out the [gRPC API specification](api/protobuf-spec/mmlogic.proto) to see how to use it. To write a client against this API, you'll need to compile the protobuf files to your language of choice. There is an associated cloudbuild.yaml file and Dockerfile for it in the root directory.
- When using the MMLogic API to filter players into pools, it will attempt to report back the number of players that matched the filters and how long the filters took to query state storage.
- An [example MMF](examples/functions/python3/mmlogic-simple/harness.py) using it has been written in Python3. There is an associated cloudbuild.yaml file and Dockerfile for it in the root directory. By default the [example backend client](examples/backendclient/main.go) is now configured to use this MMF, so make sure you have it avaiable before you try to run the latest backend client.
- An [example MMF](examples/functions/php/mmlogic-simple/harness.py) using it has been contributed by Ilya Hrankouski in PHP (thanks!). - The API specs have been split into separate files per API and the protobuf messages are in a separate file. Things were renamed slightly as a result, and you will need to update your API clients. The Frontend API hasn't had it's messages moved to the shared messages file yet, but this will happen in an upcoming version.
- The [example golang MMF](examples/functions/golang/manual-simple/) has been updated to use the latest data schemas for MatchObjects, and renamed to `manual-simple` to denote that it is manually manipulating Redis, not using the MMLogic API.
- The API specs have been split into separate files per API and the protobuf messages are in a separate file. Things were renamed slightly as a result, and you will need to update your API clients. The Frontend API hasn't had it's messages moved to the shared messages file yet, but this will happen in an upcoming version.
- The message model for using the Backend API has changed slightly - for calls that make MatchObjects, the expectation is that you will provide a MatchObject with a few fields populated, and it will then be shuttled along through state storage to your MMF and back out again, with various processes 'filling in the blanks' of your MatchObject, which is then returned to your code calling the Backend API. Read the[gRPC API specification](api/protobuf-spec/backend.proto) for more information.
- As part of this, compiled protobuf golang modules now live in the [`internal/pb`](internal/pb) directory. There's a handy [bash script](api/protoc-go.sh) for compiling them from the `api/protobuf-spec` directory into this new `internal/pb` directory for development in your local golang environment if you need it.
- As part of this Backend API message shift and the advent of the MMLogic API, 'player pools' and 'rosters' are now first-class data structures in MatchObjects for those who wish to use them. You can ignore them if you like, but if you want to use some of the MMLogic API calls to automate tasks for you - things like filtering a pool of players according attributes or adding all the players in your rosters to the ignorelist so other MMFs don't try to grab them - you'll need to put your data into the [protobuf messages](api/protobuf-spec/messages.proto) so Open Match knows how to read them. The sample backend client [test profile JSON](examples/backendclient/profiles/testprofile.json)has been updated to use this format if you want to see an example.
- MMLogic API is now available. Deploy it to kubernetes using the [appropriate json file]() and check out the [gRPC API specification](api/protobuf-spec/mmlogic.proto) to see how to use it. To write a client against this API, you'll need to compile the protobuf files to your language of choice. There is an associated cloudbuild.yaml file and Dockerfile for it in the root directory.
- When using the MMLogic API to filter players into pools, it will attempt to report back the number of players that matched the filters and how long the filters took to query state storage.
- An [example MMF](examples/functions/python3/mmlogic-simple/harness.py) using it has been written in Python3. There is an associated cloudbuild.yaml file and Dockerfile for it in the root directory. By default the [example backend client](examples/backendclient/main.go) is now configured to use this MMF, so make sure you have it avaiable before you try to run the latest backend client.
- An [example MMF](examples/functions/php/mmlogic-simple/harness.py) using it has been contributed by Ilya Hrankouski in PHP (thanks!). - The API specs have been split into separate files per API and the protobuf messages are in a separate file. Things were renamed slightly as a result, and you will need to update your API clients. The Frontend API hasn't had it's messages moved to the shared messages file yet, but this will happen in an upcoming version.
- The [example golang MMF](examples/functions/golang/manual-simple/) has been updated to use the latest data schemas for MatchObjects, and renamed to `manual-simple` to denote that it is manually manipulating Redis, not using the MMLogic API.
- The API specs have been split into separate files per API and the protobuf messages are in a separate file. Things were renamed slightly as a result, and you will need to update your API clients. The Frontend API hasn't had it's messages moved to the shared messages file yet, but this will happen in an upcoming version.
- The message model for using the Backend API has changed slightly - for calls that make MatchObjects, the expectation is that you will provide a MatchObject with a few fields populated, and it will then be shuttled along through state storage to your MMF and back out again, with various processes 'filling in the blanks' of your MatchObject, which is then returned to your code calling the Backend API. Read the[gRPC API specification](api/protobuf-spec/backend.proto) for more information.
- As part of this, compiled protobuf golang modules now live in the [`internal/pb`](internal/pb) directory. There's a handy [bash script](api/protoc-go.sh) for compiling them from the `api/protobuf-spec` directory into this new `internal/pb` directory for development in your local golang environment if you need it.
- As part of this Backend API message shift and the advent of the MMLogic API, 'player pools' and 'rosters' are now first-class data structures in MatchObjects for those who wish to use them. You can ignore them if you like, but if you want to use some of the MMLogic API calls to automate tasks for you - things like filtering a pool of players according attributes or adding all the players in your rosters to the ignorelist so other MMFs don't try to grab them - you'll need to put your data into the [protobuf messages](api/protobuf-spec/messages.proto) so Open Match knows how to read them. The sample backend client [test profile JSON](examples/backendclient/profiles/testprofile.json)has been updated to use this format if you want to see an example.
- Rosters were formerly space-delimited lists of player IDs. They are now first-class repeated protobuf message fields in the [Roster message format](api/protobuf-spec/messages.proto). That means that in most languages, you can access the roster as a list of players using your native language data structures (more info can be found in the [guide for using protocol buffers in your langauge of choice](https://developers.google.com/protocol-buffers/docs/reference/overview)). If you don't care about the new fields or the new functionality, you can just leave all the other fields but the player ID unset.
- Open Match is transitioning to using [protocol buffer messages](https://developers.google.com/protocol-buffers/) as its internal data format. There is now a Redis state storage [golang module](internal/statestorage/redis/redispb/) for marshaling and unmarshaling MatchObject messages to and from Redis. It isn't very clean code right now but will get worked on for the next couple releases.
- Ignorelists now exist, and have a Redis state storage [golang module](internal/statestorage/redis/ignorelist/) for CRUD access. Currently three ignorelists are defined in the [config file](config/matchmaker_config.json) with their respective parameters. These are implemented as [Sorted Sets in Redis](https://redis.io/commands#sorted_set).
@ -23,10 +51,10 @@
### Roadmap
- It has become clear from talking to multiple users that the software they write to talk to the Backend API needs a name. 'Backend API Client' is technically correct, but given how many APIs are in Open Match and the overwhelming use of 'Client' to refer to a Game Client in the industry, we're currently calling this a 'Director', as its primary purpose is to 'direct' which profiles are sent to the backend, and 'direct' the resulting MatchObjects to game servers. Further discussion / suggestions are welcome.
- We'll be entering the design stage on longer-running MMFs before the end of the year. We'll get a proposal together and on the github repo as a request for comments, so please keep your eye out for that.
- Match profiles providing multiple MMFs to run isn't planned anymore. Just send multiple copies of the profile with different MMFs specified via the backendapi.
- Redis Sentinel will likely not be supported. Instead, replicated instances and HAProxy may be the HA solution of choice. There's an [outstanding issue to investigate and implement](https://github.com/GoogleCloudPlatform/open-match/issues/41) if it fills our needs, feel free to contribute!
- It has become clear from talking to multiple users that the software they write to talk to the Backend API needs a name. 'Backend API Client' is technically correct, but given how many APIs are in Open Match and the overwhelming use of 'Client' to refer to a Game Client in the industry, we're currently calling this a 'Director', as its primary purpose is to 'direct' which profiles are sent to the backend, and 'direct' the resulting MatchObjects to game servers. Further discussion / suggestions are welcome.
- We'll be entering the design stage on longer-running MMFs before the end of the year. We'll get a proposal together and on the github repo as a request for comments, so please keep your eye out for that.
- Match profiles providing multiple MMFs to run isn't planned anymore. Just send multiple copies of the profile with different MMFs specified via the backendapi.
- Redis Sentinel will likely not be supported. Instead, replicated instances and HAProxy may be the HA solution of choice. There's an [outstanding issue to investigate and implement](https://github.com/GoogleCloudPlatform/open-match/issues/41) if it fills our needs, feel free to contribute!
## v0.1.0 (alpha)
Initial release.

View File

@ -1,13 +0,0 @@
# Golang application builder steps
FROM golang:1.10.3 as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY cmd/backendapi cmd/backendapi
COPY config config
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/backendapi
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/backendapi/backendapi .
ENTRYPOINT ["./backendapi"]

View File

@ -2,6 +2,8 @@
FROM golang:1.10.3 as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY config config
RUN rm -f config/matchmaker_config.json
RUN rm -f config/matchmaker_config.yaml
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/internal
RUN go get -d -v ...

7
Dockerfile.base-build Normal file
View File

@ -0,0 +1,7 @@
FROM golang:latest
ENV GO111MODULE=on
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY . .
RUN go mod download

View File

@ -1,13 +0,0 @@
# Golang application builder steps
FROM golang:1.10.3 as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY examples/evaluators/golang/simple examples/evaluators/golang/simple
COPY config config
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/examples/evaluators/golang/simple
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/mmfstub/mmfstub mmfstub
ENTRYPOINT ["./simple"]

View File

@ -1,13 +0,0 @@
# Golang application builder steps
FROM golang:1.10.3 as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY cmd/frontendapi cmd/frontendapi
COPY config config
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/frontendapi
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/frontendapi .
ENTRYPOINT ["./frontendapi"]

View File

@ -1,12 +0,0 @@
# Golang application builder steps
# FROM golang:1.10.3 as builder
FROM gcr.io/matchmaker-dev-201405/openmatch-devbase as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY examples/functions/golang/manual-simple examples/functions/golang/manual-simple
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/examples/functions/golang/manual-simple
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o mmf .
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/mmfstub/mmfstub mmfstub
CMD ["./mmf"]

View File

@ -1,24 +0,0 @@
# Golang application builder steps
FROM golang:1.10.3 as builder
# Necessary to get a specific version of the golang k8s client
RUN go get github.com/tools/godep
RUN go get k8s.io/client-go/...
WORKDIR /go/src/k8s.io/client-go
RUN git checkout v7.0.0
RUN godep restore ./...
RUN rm -rf vendor/
RUN rm -rf /go/src/github.com/golang/protobuf/
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/
COPY cmd/mmforc cmd/mmforc
COPY config config
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmforc/
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
# Uncomment to build production images (removes all troubleshooting tools)
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmforc/mmforc .
CMD ["./mmforc"]

View File

@ -1,13 +0,0 @@
# Golang application builder steps
FROM golang:1.10.3 as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match
COPY cmd/mmlogicapi cmd/mmlogicapi
COPY config config
COPY internal internal
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmlogicapi
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
#FROM scratch
#COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/frontendapi .
ENTRYPOINT ["./mmlogicapi"]

533
Makefile Normal file
View File

@ -0,0 +1,533 @@
################################################################################
## Open Match Makefile ##
################################################################################
# Notice: There's 2 variables you need to make sure are set.
# GCP_PROJECT_ID if you're working against GCP.
# Or $REGISTRY if you want to use your own custom docker registry.
# Basic Deployment
# make create-gke-cluster OR make create-mini-cluster
# make push-helm
# make REGISTRY=gcr.io/$PROJECT_ID push-images -j$(nproc)
# make install-chart
#
# Generate Files
# make all-protos
#
# Building
# make all -j$(nproc)
#
# Access monitoring
# make proxy-prometheus
# make proxy-grafana
#
# Run those tools
# make run-backendclient
# make run-frontendclient
# make run-clientloadgen
#
# Teardown
# make delete-mini-cluster
# make delete-gke-cluster
#
## http://makefiletutorial.com/
BASE_VERSION = 0.4.0
VERSION_SUFFIX = $(shell git rev-parse --short=7 HEAD)
VERSION ?= $(BASE_VERSION)-$(VERSION_SUFFIX)
PROTOC_VERSION = 3.7.1
GOLANG_VERSION = 1.12.1
HELM_VERSION = 2.13.0
HUGO_VERSION = 0.54.0
KUBECTL_VERSION = 1.13.0
NODEJS_VERSION = 10.15.3
SKAFFOLD_VERSION = latest
MINIKUBE_VERSION = latest
PROTOC_RELEASE_BASE = https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)
GO = go
GO_BIN := $(GOPATH)/bin
GO_SRC := $(GOPATH)/src
GO_BUILD_COMMAND = CGO_ENABLED=0 GOOS=linux $(GO) build -a -installsuffix cgo .
BUILD_DIR = $(CURDIR)/build
TOOLCHAIN_DIR = $(BUILD_DIR)/toolchain
TOOLCHAIN_BIN = $(TOOLCHAIN_DIR)/bin
PROTOC := $(TOOLCHAIN_BIN)/protoc
PROTOC_INCLUDES := $(TOOLCHAIN_DIR)/include/
GCP_PROJECT_ID =
GCP_PROJECT_FLAG = --project=$(GCP_PROJECT_ID)
OM_SITE_GCP_PROJECT_ID = open-match-site
OM_SITE_GCP_PROJECT_FLAG = --project=$(OM_SITE_GCP_PROJECT_ID)
REGISTRY = gcr.io/$(GCP_PROJECT_ID)
TAG := $(VERSION)
ALTERNATE_TAG := dev
GKE_CLUSTER_NAME = om-cluster
GCP_REGION = us-west1
GCP_ZONE = us-west1-a
EXE_EXTENSION =
LOCAL_CLOUD_BUILD_PUSH = # --push
KUBECTL_RUN_ENV = --env='REDIS_SERVICE_HOST=$$(OPEN_MATCH_REDIS_MASTER_SERVICE_HOST)' --env='REDIS_SERVICE_PORT=$$(OPEN_MATCH_REDIS_MASTER_SERVICE_PORT)'
GCP_LOCATION_FLAG = --zone $(GCP_ZONE)
GO111MODULE = on
PROMETHEUS_PORT = 9090
GRAFANA_PORT = 3000
SITE_PORT = 8080
HELM = $(TOOLCHAIN_BIN)/helm
TILLER = $(TOOLCHAIN_BIN)/tiller
MINIKUBE = $(TOOLCHAIN_BIN)/minikube
KUBECTL = $(TOOLCHAIN_BIN)/kubectl
SERVICE = default
## Make port forwards accessible outside of the proxy machine.
PORT_FORWARD_ADDRESS_FLAG = --address 0.0.0.0
DASHBOARD_PORT = 9092
export PATH := $(CURDIR)/node_modules/.bin/:$(TOOLCHAIN_BIN):$(TOOLCHAIN_DIR)/nodejs/bin:$(PATH)
ifneq (,$(wildcard $(TOOLCHAIN_GOLANG_DIR)/bin/go))
export GO = $(CURDIR)/$(TOOLCHAIN_GOLANG_DIR)/bin/go
export GOROOT = $(CURDIR)/$(TOOLCHAIN_GOLANG_DIR)
export PATH := $(TOOLCHAIN_GOLANG_DIR):$(PATH)
endif
# Get the project from gcloud if it's not set.
ifeq ($(GCP_PROJECT_ID),)
export GCP_PROJECT_ID = $(shell gcloud config list --format 'value(core.project)')
endif
ifeq ($(OS),Windows_NT)
# TODO: Windows packages are here but things are broken since many paths are Linux based and zip vs tar.gz.
HELM_PACKAGE = https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-windows-amd64.zip
MINIKUBE_PACKAGE = https://storage.googleapis.com/minikube/releases/$(MINIKUBE_VERSION)/minikube-windows-amd64.exe
SKAFFOLD_PACKAGE = https://storage.googleapis.com/skaffold/releases/$(SKAFFOLD_VERSION)/skaffold-windows-amd64.exe
EXE_EXTENSION = .exe
PROTOC_PACKAGE = $(PROTOC_RELEASE_BASE)-win64.zip
GO_PACKAGE = https://storage.googleapis.com/golang/go$(GOLANG_VERSION).windows-amd64.zip
KUBECTL_PACKAGE = https://storage.googleapis.com/kubernetes-release/release/v$(KUBECTL_VERSION)/bin/windows/amd64/kubectl.exe
HUGO_PACKAGE = https://github.com/gohugoio/hugo/releases/download/v$(HUGO_VERSION)/hugo_extended_$(HUGO_VERSION)_Windows-64bit.zip
NODEJS_PACKAGE = https://nodejs.org/dist/v$(NODEJS_VERSION)/node-v$(NODEJS_VERSION)-win-x64.zip
NODEJS_PACKAGE_NAME = nodejs.zip
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
HELM_PACKAGE = https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-linux-amd64.tar.gz
MINIKUBE_PACKAGE = https://storage.googleapis.com/minikube/releases/$(MINIKUBE_VERSION)/minikube-linux-amd64
SKAFFOLD_PACKAGE = https://storage.googleapis.com/skaffold/releases/$(SKAFFOLD_VERSION)/skaffold-linux-amd64
PROTOC_PACKAGE = $(PROTOC_RELEASE_BASE)-linux-x86_64.zip
GO_PACKAGE = https://storage.googleapis.com/golang/go$(GOLANG_VERSION).linux-amd64.tar.gz
KUBECTL_PACKAGE = https://storage.googleapis.com/kubernetes-release/release/v$(KUBECTL_VERSION)/bin/linux/amd64/kubectl
HUGO_PACKAGE = https://github.com/gohugoio/hugo/releases/download/v$(HUGO_VERSION)/hugo_extended_$(HUGO_VERSION)_Linux-64bit.tar.gz
NODEJS_PACKAGE = https://nodejs.org/dist/v$(NODEJS_VERSION)/node-v$(NODEJS_VERSION)-linux-x64.tar.gz
NODEJS_PACKAGE_NAME = nodejs.tar.gz
endif
ifeq ($(UNAME_S),Darwin)
HELM_PACKAGE = https://storage.googleapis.com/kubernetes-helm/helm-v$(HELM_VERSION)-darwin-amd64.tar.gz
MINIKUBE_PACKAGE = https://storage.googleapis.com/minikube/releases/$(MINIKUBE_VERSION)/minikube-darwin-amd64
SKAFFOLD_PACKAGE = https://storage.googleapis.com/skaffold/releases/$(SKAFFOLD_VERSION)/skaffold-darwin-amd64
PROTOC_PACKAGE = $(PROTOC_RELEASE_BASE)-osx-x86_64.zip
GO_PACKAGE = https://storage.googleapis.com/golang/go$(GOLANG_VERSION).darwin-amd64.tar.gz
KUBECTL_PACKAGE = https://storage.googleapis.com/kubernetes-release/release/v$(KUBECTL_VERSION)/bin/darwin/amd64/kubectl
HUGO_PACKAGE = https://github.com/gohugoio/hugo/releases/download/v$(HUGO_VERSION)/hugo_extended_$(HUGO_VERSION)_macOS-64bit.tar.gz
NODEJS_PACKAGE = https://nodejs.org/dist/v$(NODEJS_VERSION)/node-v$(NODEJS_VERSION)-darwin-x64.tar.gz
NODEJS_PACKAGE_NAME = nodejs.tar.gz
endif
endif
help:
@cat Makefile | grep ^\# | grep -v ^\#\# | cut -c 3-
local-cloud-build:
cloud-build-local --config=cloudbuild.yaml --dryrun=false $(LOCAL_CLOUD_BUILD_PUSH) -substitutions SHORT_SHA=$(VERSION_SUFFIX) .
push-images: push-service-images push-client-images push-mmf-example-images push-evaluator-example-images
push-service-images: push-frontendapi-image push-backendapi-image push-mmforc-image push-mmlogicapi-image
push-mmf-example-images: push-mmf-cs-mmlogic-simple-image push-mmf-go-mmlogic-simple-image push-mmf-php-mmlogic-simple-image push-mmf-py3-mmlogic-simple-image
push-client-images: push-backendclient-image push-clientloadgen-image push-frontendclient-image
push-evaluator-example-images: push-evaluator-simple-image
push-frontendapi-image: build-frontendapi-image
docker push $(REGISTRY)/openmatch-frontendapi:$(TAG)
docker push $(REGISTRY)/openmatch-frontendapi:$(ALTERNATE_TAG)
push-backendapi-image: build-backendapi-image
docker push $(REGISTRY)/openmatch-backendapi:$(TAG)
docker push $(REGISTRY)/openmatch-backendapi:$(ALTERNATE_TAG)
push-mmforc-image: build-mmforc-image
docker push $(REGISTRY)/openmatch-mmforc:$(TAG)
docker push $(REGISTRY)/openmatch-mmforc:$(ALTERNATE_TAG)
push-mmlogicapi-image: build-mmlogicapi-image
docker push $(REGISTRY)/openmatch-mmlogicapi:$(TAG)
docker push $(REGISTRY)/openmatch-mmlogicapi:$(ALTERNATE_TAG)
push-mmf-cs-mmlogic-simple-image: build-mmf-cs-mmlogic-simple-image
docker push $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(TAG)
docker push $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(ALTERNATE_TAG)
push-mmf-go-mmlogic-simple-image: build-mmf-go-mmlogic-simple-image
docker push $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(TAG)
docker push $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(ALTERNATE_TAG)
push-mmf-php-mmlogic-simple-image: build-mmf-php-mmlogic-simple-image
docker push $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(TAG)
docker push $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(ALTERNATE_TAG)
push-mmf-py3-mmlogic-simple-image: build-mmf-py3-mmlogic-simple-image
docker push $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(TAG)
docker push $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(ALTERNATE_TAG)
push-backendclient-image: build-backendclient-image
docker push $(REGISTRY)/openmatch-backendclient:$(TAG)
docker push $(REGISTRY)/openmatch-backendclient:$(ALTERNATE_TAG)
push-clientloadgen-image: build-clientloadgen-image
docker push $(REGISTRY)/openmatch-clientloadgen:$(TAG)
docker push $(REGISTRY)/openmatch-clientloadgen:$(ALTERNATE_TAG)
push-frontendclient-image: build-frontendclient-image
docker push $(REGISTRY)/openmatch-frontendclient:$(TAG)
docker push $(REGISTRY)/openmatch-frontendclient:$(ALTERNATE_TAG)
push-evaluator-simple-image: build-evaluator-simple-image
docker push $(REGISTRY)/openmatch-evaluator-simple:$(TAG)
docker push $(REGISTRY)/openmatch-evaluator-simple:$(ALTERNATE_TAG)
build-images: build-service-images build-client-images build-mmf-example-images build-evaluator-example-images
build-service-images: build-frontendapi-image build-backendapi-image build-mmforc-image build-mmlogicapi-image
build-client-images: build-backendclient-image build-clientloadgen-image build-frontendclient-image
build-mmf-example-images: build-mmf-cs-mmlogic-simple-image build-mmf-go-mmlogic-simple-image build-mmf-php-mmlogic-simple-image build-mmf-py3-mmlogic-simple-image
build-evaluator-example-images: build-evaluator-simple-image
build-base-build-image:
docker build -f Dockerfile.base-build -t open-match-base-build .
build-frontendapi-image: build-base-build-image
docker build -f cmd/frontendapi/Dockerfile -t $(REGISTRY)/openmatch-frontendapi:$(TAG) -t $(REGISTRY)/openmatch-frontendapi:$(ALTERNATE_TAG) .
build-backendapi-image: build-base-build-image
docker build -f cmd/backendapi/Dockerfile -t $(REGISTRY)/openmatch-backendapi:$(TAG) -t $(REGISTRY)/openmatch-backendapi:$(ALTERNATE_TAG) .
build-mmforc-image: build-base-build-image
docker build -f cmd/mmforc/Dockerfile -t $(REGISTRY)/openmatch-mmforc:$(TAG) -t $(REGISTRY)/openmatch-mmforc:$(ALTERNATE_TAG) .
build-mmlogicapi-image: build-base-build-image
docker build -f cmd/mmlogicapi/Dockerfile -t $(REGISTRY)/openmatch-mmlogicapi:$(TAG) -t $(REGISTRY)/openmatch-mmlogicapi:$(ALTERNATE_TAG) .
build-mmf-cs-mmlogic-simple-image:
cd examples/functions/csharp/simple/ && docker build -f Dockerfile -t $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(TAG) -t $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(ALTERNATE_TAG) .
build-mmf-go-mmlogic-simple-image: build-base-build-image
docker build -f examples/functions/golang/manual-simple/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(ALTERNATE_TAG) .
build-mmf-php-mmlogic-simple-image:
docker build -f examples/functions/php/mmlogic-simple/Dockerfile -t $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(TAG) -t $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(ALTERNATE_TAG) .
build-mmf-py3-mmlogic-simple-image:
docker build -f examples/functions/python3/mmlogic-simple/Dockerfile -t $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(TAG) -t $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(ALTERNATE_TAG) .
build-backendclient-image: build-base-build-image
docker build -f examples/backendclient/Dockerfile -t $(REGISTRY)/openmatch-backendclient:$(TAG) -t $(REGISTRY)/openmatch-backendclient:$(ALTERNATE_TAG) .
build-clientloadgen-image: build-base-build-image
docker build -f test/cmd/clientloadgen/Dockerfile -t $(REGISTRY)/openmatch-clientloadgen:$(TAG) -t $(REGISTRY)/openmatch-clientloadgen:$(ALTERNATE_TAG) .
build-frontendclient-image: build-base-build-image
docker build -f test/cmd/frontendclient/Dockerfile -t $(REGISTRY)/openmatch-frontendclient:$(TAG) -t $(REGISTRY)/openmatch-frontendclient:$(ALTERNATE_TAG) .
build-evaluator-simple-image: build-base-build-image
docker build -f examples/evaluators/golang/simple/Dockerfile -t $(REGISTRY)/openmatch-evaluator-simple:$(TAG) -t $(REGISTRY)/openmatch-evaluator-simple:$(ALTERNATE_TAG) .
clean-images:
-docker rmi -f open-match-base-build
-docker rmi -f $(REGISTRY)/openmatch-frontendapi:$(TAG) $(REGISTRY)/openmatch-frontendapi:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-backendapi:$(TAG) $(REGISTRY)/openmatch-backendapi:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmforc:$(TAG) $(REGISTRY)/openmatch-mmforc:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmlogicapi:$(TAG) $(REGISTRY)/openmatch-mmlogicapi:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(TAG) $(REGISTRY)/openmatch-mmf-cs-mmlogic-simple:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(TAG) $(REGISTRY)/openmatch-mmf-go-mmlogic-simple:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(TAG) $(REGISTRY)/openmatch-mmf-php-mmlogic-simple:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(TAG) $(REGISTRY)/openmatch-mmf-py3-mmlogic-simple:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-backendclient:$(TAG) $(REGISTRY)/openmatch-backendclient:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-clientloadgen:$(TAG) $(REGISTRY)/openmatch-clientloadgen:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-frontendclient:$(TAG) $(REGISTRY)/openmatch-frontendclient:$(ALTERNATE_TAG)
-docker rmi -f $(REGISTRY)/openmatch-evaluator-simple:$(TAG) $(REGISTRY)/openmatch-evaluator-simple:$(ALTERNATE_TAG)
install-redis: build/toolchain/bin/helm$(EXE_EXTENSION)
$(HELM) upgrade --install --wait --debug redis stable/redis --namespace redis
chart-deps: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd install/helm/open-match; $(HELM) dependency update)
print-chart: build/toolchain/bin/helm$(EXE_EXTENSION)
(cd install/helm; $(HELM) lint open-match; $(HELM) install --dry-run --debug open-match)
install-chart: build/toolchain/bin/helm$(EXE_EXTENSION)
$(HELM) upgrade --install --wait --debug open-match install/helm/open-match \
--namespace=open-match \
--set openmatch.image.registry=$(REGISTRY) \
--set openmatch.image.tag=$(TAG)
install-example-chart: build/toolchain/bin/helm$(EXE_EXTENSION)
$(HELM) upgrade --install --wait --debug open-match-example install/helm/open-match-example \
--namespace=open-match \
--set openmatch.image.registry=$(REGISTRY) \
--set openmatch.image.tag=$(TAG)
delete-example-chart: build/toolchain/bin/helm$(EXE_EXTENSION)
-$(HELM) delete --purge open-match-example
dry-chart: build/toolchain/bin/helm$(EXE_EXTENSION)
$(HELM) upgrade --install --wait --debug --dry-run open-match install/helm/open-match \
--namespace=open-match \
--set openmatch.image.registry=$(REGISTRY) \
--set openmatch.image.tag=$(TAG)
delete-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(HELM) delete --purge open-match
-$(KUBECTL) delete crd prometheuses.monitoring.coreos.com
-$(KUBECTL) delete crd servicemonitors.monitoring.coreos.com
-$(KUBECTL) delete crd prometheusrules.monitoring.coreos.com
update-helm-deps:
(cd install/helm/open-match; helm dependencies update)
install-toolchain: build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION) build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/minikube$(EXE_EXTENSION) build/toolchain/bin/skaffold$(EXE_EXTENSION) build/toolchain/bin/hugo$(EXE_EXTENSION) build/toolchain/python/
build/toolchain/bin/helm$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
mkdir -p $(TOOLCHAIN_DIR)/temp-helm
cd $(TOOLCHAIN_DIR)/temp-helm && curl -Lo helm.tar.gz $(HELM_PACKAGE) && tar xvzf helm.tar.gz --strip-components 1
mv $(TOOLCHAIN_DIR)/temp-helm/helm$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/helm$(EXE_EXTENSION)
mv $(TOOLCHAIN_DIR)/temp-helm/tiller$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/tiller$(EXE_EXTENSION)
rm -rf $(TOOLCHAIN_DIR)/temp-helm/
build/toolchain/bin/hugo$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
mkdir -p $(TOOLCHAIN_DIR)/temp-hugo
cd $(TOOLCHAIN_DIR)/temp-hugo && curl -Lo hugo.tar.gz $(HUGO_PACKAGE) && tar xvzf hugo.tar.gz
mv $(TOOLCHAIN_DIR)/temp-hugo/hugo$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/hugo$(EXE_EXTENSION)
rm -rf $(TOOLCHAIN_DIR)/temp-hugo/
build/toolchain/bin/minikube$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
curl -Lo minikube$(EXE_EXTENSION) $(MINIKUBE_PACKAGE)
chmod +x minikube$(EXE_EXTENSION)
mv minikube$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/minikube$(EXE_EXTENSION)
build/toolchain/bin/kubectl$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
curl -Lo kubectl$(EXE_EXTENSION) $(KUBECTL_PACKAGE)
chmod +x kubectl$(EXE_EXTENSION)
mv kubectl$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/kubectl$(EXE_EXTENSION)
build/toolchain/bin/skaffold$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
curl -Lo skaffold$(EXE_EXTENSION) $(SKAFFOLD_PACKAGE)
chmod +x skaffold$(EXE_EXTENSION)
mv skaffold$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/skaffold$(EXE_EXTENSION)
push-helm: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) create serviceaccount --namespace kube-system tiller
$(HELM) init --service-account tiller --force-upgrade
$(KUBECTL) create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
ifneq ($(strip $($(KUBECTL) get clusterroles | grep -i rbac)),)
$(KUBECTL) patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
endif
echo "Waiting for Tiller to become ready..."
$(KUBECTL) wait deployment --timeout=60s --for condition=available -l app=helm,name=tiller --namespace kube-system
delete-helm: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(HELM) reset
-$(KUBECTL) delete serviceaccount --namespace kube-system tiller
-$(KUBECTL) delete clusterrolebinding tiller-cluster-rule
ifneq ($(strip $($(KUBECTL) get clusterroles | grep -i rbac)),)
-$(KUBECTL) delete deployment --namespace kube-system tiller-deploy
endif
echo "Waiting for Tiller to go away..."
-$(KUBECTL) wait deployment --timeout=60s --for delete -l app=helm,name=tiller --namespace kube-system
auth-docker:
gcloud $(GCP_PROJECT_FLAG) auth configure-docker
auth-gke-cluster:
gcloud $(GCP_PROJECT_FLAG) container clusters get-credentials $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG)
create-gke-cluster:
gcloud $(GCP_PROJECT_FLAG) container clusters create $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG) --machine-type n1-standard-4 --tags open-match
delete-gke-cluster:
gcloud $(GCP_PROJECT_FLAG) container clusters delete $(GKE_CLUSTER_NAME) $(GCP_LOCATION_FLAG)
create-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
$(MINIKUBE) start --memory 6144 --cpus 4 --disk-size 50g
delete-mini-cluster: build/toolchain/bin/minikube$(EXE_EXTENSION)
$(MINIKUBE) delete
build/toolchain/python/:
mkdir -p build/toolchain/python/
virtualenv --python=python3 build/toolchain/python/
# Hack to workaround some crazy bug in pip that's chopping off python executable's name.
cd build/toolchain/python/bin && ln -s python3 pytho
cd build/toolchain/python/ && . bin/activate && pip install grpcio-tools && deactivate
build/toolchain/bin/protoc$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
curl -o $(TOOLCHAIN_DIR)/protoc-temp.zip -L $(PROTOC_PACKAGE)
(cd $(TOOLCHAIN_DIR); unzip -o protoc-temp.zip)
rm $(TOOLCHAIN_DIR)/protoc-temp.zip $(TOOLCHAIN_DIR)/readme.txt
build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION):
$(GO) get github.com/golang/protobuf/protoc-gen-go
$(GO) install github.com/golang/protobuf/protoc-gen-go
mv $(GOPATH)/bin/protoc-gen-go$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION)
all-protos: internal/pb/backend.pb.go internal/pb/frontend.pb.go internal/pb/function.pb.go internal/pb/messages.pb.go internal/pb/mmlogic.pb.go mmlogic-simple-protos
internal/pb/%.pb.go: api/protobuf-spec/%.proto build/toolchain/bin/protoc$(EXE_EXTENSION) build/toolchain/bin/protoc-gen-go$(EXE_EXTENSION)
$(PROTOC) $< \
-I $(CURDIR) -I $(PROTOC_INCLUDES) \
--go_out=plugins=grpc:$(GO_SRC)
## Include structure of the protos needs to be called out do the dependency chain is run through properly.
internal/pb/backend.pb.go: internal/pb/messages.pb.go
internal/pb/frontend.pb.go: internal/pb/messages.pb.go
internal/pb/mmlogic.pb.go: internal/pb/messages.pb.go
internal/pb/function.pb.go: internal/pb/messages.pb.go
mmlogic-simple-protos: examples/functions/python3/mmlogic-simple/api/protobuf_spec/messages_pb2.py examples/functions/python3/mmlogic-simple/api/protobuf_spec/mmlogic_pb2.py
examples/functions/python3/mmlogic-simple/api/protobuf_spec/%_pb2.py: api/protobuf-spec/%.proto build/toolchain/python/
. build/toolchain/python/bin/activate && python3 -m grpc_tools.protoc -I $(CURDIR) -I $(PROTOC_INCLUDES) --python_out=examples/functions/python3/mmlogic-simple/ --grpc_python_out=examples/functions/python3/mmlogic-simple/ $< && deactivate
internal/pb/%_pb2.py: api/protobuf-spec/%.proto build/toolchain/python/
. build/toolchain/python/bin/activate && python3 -m grpc_tools.protoc -I $(CURDIR) -I $(PROTOC_INCLUDES) --python_out=$(CURDIR) --grpc_python_out=$(CURDIR) $< && deactivate
build:
$(GO) build ./...
test:
$(GO) test ./... -race
fmt:
$(GO) fmt ./...
vet:
$(GO) vet ./...
cmd/backendapi/backendapi: internal/pb/backend.pb.go
cd cmd/backendapi; $(GO_BUILD_COMMAND)
cmd/frontendapi/frontendapi: internal/pb/frontend.pb.go
cd cmd/frontendapi; $(GO_BUILD_COMMAND)
cmd/mmforc/mmforc:
cd cmd/mmforc; $(GO_BUILD_COMMAND)
cmd/mmlogicapi/mmlogicapi: internal/pb/mmlogic.pb.go
cd cmd/mmlogicapi; $(GO_BUILD_COMMAND)
examples/backendclient/backendclient: internal/pb/backend.pb.go
cd examples/backendclient; $(GO_BUILD_COMMAND)
examples/evaluators/golang/simple/simple: internal/pb/messages.pb.go
cd examples/evaluators/golang/simple; $(GO_BUILD_COMMAND)
examples/functions/golang/manual-simple/manual-simple: internal/pb/messages.pb.go
cd examples/functions/golang/manual-simple; $(GO_BUILD_COMMAND)
test/cmd/clientloadgen/clientloadgen:
cd test/cmd/clientloadgen; $(GO_BUILD_COMMAND)
test/cmd/frontendclient/frontendclient: internal/pb/frontend.pb.go internal/pb/messages.pb.go
cd test/cmd/frontendclient; $(GO_BUILD_COMMAND)
build/archives/${NODEJS_PACKAGE_NAME}:
mkdir -p build/archives/
cd build/archives/ && curl -L -o ${NODEJS_PACKAGE_NAME} ${NODEJS_PACKAGE}
build/toolchain/nodejs/: build/archives/${NODEJS_PACKAGE_NAME}
mkdir -p build/toolchain/nodejs/
cd build/toolchain/nodejs/ && tar xvzf ../../archives/${NODEJS_PACKAGE_NAME} --strip-components 1
install-npm: build/toolchain/nodejs/
echo "{}" > package.json
$(TOOLCHAIN_DIR)/nodejs/bin/npm install postcss-cli autoprefixer
build/site/: build/toolchain/bin/hugo$(EXE_EXTENSION)
rm -rf build/site/
mkdir -p build/site/
cd site/ && ../build/toolchain/bin/hugo$(EXE_EXTENSION) --enableGitInfo --config=config.toml --source . --destination $(BUILD_DIR)/site/public/
-cp -f site/* $(BUILD_DIR)/site
#cd $(BUILD_DIR)/site && "SERVICE=$(SERVICE) envsubst < app.yaml > .app.yaml"
cp $(BUILD_DIR)/site/app.yaml $(BUILD_DIR)/site/.app.yaml
browse-site: build/site/
cd $(BUILD_DIR)/site && dev_appserver.py .app.yaml
deploy-dev-site: build/site/
cd $(BUILD_DIR)/site && gcloud $(OM_SITE_GCP_PROJECT_FLAG) app deploy .app.yaml --promote --version=$(VERSION_SUFFIX) --quiet
run-site: build/toolchain/bin/hugo$(EXE_EXTENSION)
cd site/ && ../build/toolchain/bin/hugo$(EXE_EXTENSION) server --debug --watch --enableGitInfo . --bind 0.0.0.0 --port $(SITE_PORT) --disableFastRender
all: service-binaries client-binaries example-binaries
service-binaries: cmd/backendapi/backendapi cmd/frontendapi/frontendapi cmd/mmforc/mmforc cmd/mmlogicapi/mmlogicapi
client-binaries: examples/backendclient/backendclient test/cmd/clientloadgen/clientloadgen test/cmd/frontendclient/frontendclient
example-binaries: examples/evaluators/golang/simple/simple examples/functions/golang/manual-simple
presubmit: fmt vet build test
clean-site:
rm -rf build/site/
clean-protos:
rm -rf internal/pb/
rm -rf api/protobuf_spec/
clean-binaries:
rm -rf cmd/backendapi/backendapi
rm -rf cmd/frontendapi/frontendapi
rm -rf cmd/mmforc/mmforc
rm -rf cmd/mmlogicapi/mmlogicapi
rm -rf examples/backendclient/backendclient
rm -rf examples/evaluators/golang/simple/simple
rm -rf examples/functions/golang/manual-simple/manual-simple
rm -rf test/cmd/clientloadgen/clientloadgen
rm -rf test/cmd/frontendclient/frontendclient
clean-toolchain:
rm -rf build/toolchain/
clean-nodejs:
rm -rf build/toolchain/nodejs/
rm -rf node_modules/
rm -rf package.json
clean: clean-images clean-binaries clean-site clean-toolchain clean-protos clean-nodejs
run-backendclient: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) run om-backendclient --rm --restart=Never --image-pull-policy=Always -i --tty --image=$(REGISTRY)/openmatch-backendclient:$(TAG) --namespace=open-match $(KUBECTL_RUN_ENV)
run-frontendclient: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) run om-frontendclient --rm --restart=Never --image-pull-policy=Always -i --tty --image=$(REGISTRY)/openmatch-frontendclient:$(TAG) --namespace=open-match $(KUBECTL_RUN_ENV)
run-clientloadgen: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) run om-clientloadgen --rm --restart=Never --image-pull-policy=Always -i --tty --image=$(REGISTRY)/openmatch-clientloadgen:$(TAG) --namespace=open-match $(KUBECTL_RUN_ENV)
proxy-grafana: build/toolchain/bin/kubectl$(EXE_EXTENSION)
echo "User: admin"
echo "Password: openmatch"
$(KUBECTL) port-forward --namespace open-match $(shell $(KUBECTL) get pod --namespace open-match --selector="app=grafana,release=open-match" --output jsonpath='{.items[0].metadata.name}') $(GRAFANA_PORT):3000 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-prometheus: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) port-forward --namespace open-match $(shell $(KUBECTL) get pod --namespace open-match --selector="app=prometheus,component=server,release=open-match" --output jsonpath='{.items[0].metadata.name}') $(PROMETHEUS_PORT):9090 $(PORT_FORWARD_ADDRESS_FLAG)
proxy-dashboard: build/toolchain/bin/kubectl$(EXE_EXTENSION)
$(KUBECTL) port-forward --namespace kube-system $(shell $(KUBECTL) get pod --namespace kube-system --selector="app=kubernetes-dashboard" --output jsonpath='{.items[0].metadata.name}') $(DASHBOARD_PORT):9090 $(PORT_FORWARD_ADDRESS_FLAG)
.PHONY: proxy-dashboard proxy-prometheus proxy-grafana clean clean-toolchain clean-binaries clean-protos presubmit test vet

177
README.md
View File

@ -1,6 +1,10 @@
# Open Match
Open Match is an open source game matchmaking framework designed to allow game creators to re-use a common matchmaker framework. Its designed to be flexible (run it anywhere Kubernetes runs), extensible (match logic can be customized to work for any game), and scalable.
[![GoDoc](https://godoc.org/github.com/GoogleCloudPlatform/open-match?status.svg)](https://godoc.org/github.com/GoogleCloudPlatform/open-match)
[![Go Report Card](https://goreportcard.com/badge/github.com/GoogleCloudPlatform/open-match)](https://goreportcard.com/report/github.com/GoogleCloudPlatform/open-match)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/GoogleCloudPlatform/open-match/blob/master/LICENSE)
Open Match is an open source game matchmaking framework designed to allow game creators to build matchmakers of any size easily and with as much possibility for sharing and code re-use as possible. Its designed to be flexible (run it anywhere Kubernetes runs), extensible (match logic can be customized to work for any game), and scalable.
Matchmaking is a complicated process, and when large player populations are involved, many popular matchmaking approaches touch on significant areas of computer science including graph theory and massively concurrent processing. Open Match is an effort to provide a foundation upon which these difficult problems can be addressed by the wider game development community. As Josh Menke &mdash; famous for working on matchmaking for many popular triple-A franchises &mdash; put it:
@ -12,7 +16,8 @@ This project attempts to solve the networking and plumbing problems, so game dev
This software is currently alpha, and subject to change. Although Open Match has already been used to run [production workloads within Google](https://cloud.google.com/blog/topics/inside-google-cloud/no-tricks-just-treats-globally-scaling-the-halloween-multiplayer-doodle-with-open-match-on-google-cloud), but it's still early days on the way to our final goal. There's plenty left to write and we welcome contributions. **We strongly encourage you to engage with the community through the [Slack or Mailing lists](#get-involved) if you're considering using Open Match in production before the 1.0 release, as the documentation is likely to lag behind the latest version a bit while we focus on getting out of alpha/beta as soon as possible.**
## Version
[The current stable version in master is 0.2.0 (alpha)](https://github.com/GoogleCloudPlatform/open-match/releases/tag/020).
[The current stable version in master is 0.3.1 (alpha)](https://github.com/GoogleCloudPlatform/open-match/releases/tag/v0.3.1-alpha). At this time only bugfixes and doc update pull requests will be considered.
Version 0.4.0 is in active development; please target code changes to the 040wip branch.
# Core Concepts
@ -22,20 +27,33 @@ Open Match is designed to support massively concurrent matchmaking, and to be sc
## Glossary
* **MMF** &mdash; Matchmaking function. This is the customizable matchmaking logic.
* **Component** &mdash; One of the discrete processes in an Open Match deployment. Open Match is composed of multiple scalable microservices called 'components'.
* **Roster** &mdash; A list of all the players in a match.
* **Profile** &mdash; The json blob containing all the parameters used to select which players go into a roster.
* **Match Object** &mdash; A protobuffer message format that contains the Profile and the results of the matchmaking function. Sent to the backend API from your game backend with an empty roster and then returned from your MMF with the matchmaking results filled in.
* **MMFOrc** &mdash; Matchmaker function orchestrator. This Open Match core component is in charge of kicking off custom matchmaking functions (MMFs) and evaluator processes.
* **State Storage** &mdash; The storage software used by Open Match to hold all the matchmaking state. Open Match ships with [Redis](https://redis.io/) as the default state storage.
* **Assignment** &mdash; Refers to assigning a player or group of players to a dedicated game server instance. Open Match offers a path to send dedicated game server connection details from your backend to your game clients after a match has been made.
### General
* **DGS** &mdash; Dedicated game server
* **Client** &mdash; The game client program the player uses when playing the game
* **Session** &mdash; In Open Match, players are matched together, then assigned to a server which hosts the game _session_. Depending on context, this may be referred to as a _match_, _map_, or just _game_ elsewhere in the industry.
### Open Match
* **Component** &mdash; One of the discrete processes in an Open Match deployment. Open Match is composed of multiple scalable microservices called _components_.
* **State Storage** &mdash; The storage software used by Open Match to hold all the matchmaking state. Open Match ships with [Redis](https://redis.io/) as the default state storage.
* **MMFOrc** &mdash; Matchmaker function orchestrator. This Open Match core component is in charge of kicking off custom matchmaking functions (MMFs) and evaluator processes.
* **MMF** &mdash; Matchmaking function. This is the customizable matchmaking logic.
* **MMLogic API** &mdash; An API that provides MMF SDK functionality. It is optional - you can also do all the state storage read and write operations yourself if you have a good reason to do so.
* **Director** &mdash; The software you (as a developer) write against the Open Match Backend API. The _Director_ decides which MMFs to run, and is responsible for sending MMF results to a DGS to host the session.
### Data Model
* **Player** &mdash; An ID and list of attributes with values for a player who wants to participate in matchmaking.
* **Roster** &mdash; A list of player objects. Used to hold all the players on a single team.
* **Filter** &mdash; A _filter_ is used to narrow down the players to only those who have an attribute value within a certain integer range. All attributes are integer values in Open Match because [that is how indices are implemented](internal/statestorage/redis/playerindices/playerindices.go). A _filter_ is defined in a _player pool_.
* **Player Pool** &mdash; A list of all the players who fit all the _filters_ defined in the pool.
* **Match Object** &mdash; A protobuffer message format that contains the _profile_ and the results of the matchmaking function. Sent to the backend API from your game backend with the _roster_(s) empty and then returned from your MMF with the matchmaking results filled in.
* **Profile** &mdash; The json blob containing all the parameters used by your MMF to select which players go into a roster together.
* **Assignment** &mdash; Refers to assigning a player or group of players to a dedicated game server instance. Open Match offers a path to send dedicated game server connection details from your backend to your game clients after a match has been made.
* **Ignore List** &mdash; Removing players from matchmaking consideration is accomplished using _ignore lists_. They contain lists of player IDs that your MMF should not include when making matches.
## Requirements
* [Kubernetes](https://kubernetes.io/) cluster &mdash; tested with version 1.9.
* [Kubernetes](https://kubernetes.io/) cluster &mdash; tested with version 1.11.7.
* [Redis 4+](https://redis.io/) &mdash; tested with 4.0.11.
* Open Match is compiled against the latest release of [Golang](https://golang.org/) &mdash; tested with 1.10.3.
* Open Match is compiled against the latest release of [Golang](https://golang.org/) &mdash; tested with 1.11.5.
## Components
@ -43,15 +61,17 @@ Open Match is a set of processes designed to run on Kubernetes. It contains thes
1. Frontend API
1. Backend API
1. Matchmaker Function Orchestrator (MMFOrc)
1. Matchmaker Function Orchestrator (MMFOrc) (may be deprecated in future versions)
It includes these **optional** (but recommended) components:
1. Matchmaking Logic (MMLogic) API
It also explicitly depends on these two **customizable** components.
1. Matchmaking "Function" (MMF)
1. Evaluator (may be deprecated in future versions)
1. Evaluator (may be optional in future versions)
While **core** components are fully open source and *can* be modified, they are designed to support the majority of matchmaking scenarios *without need to change the source code*. The Open Match repository ships with simple **customizable** example MMF and Evaluator processes, but it is expected that most users will want full control over the logic in these, so they have been designed to be as easy to modify or replace as possible.
While **core** components are fully open source and _can_ be modified, they are designed to support the majority of matchmaking scenarios *without need to change the source code*. The Open Match repository ships with simple **customizable** MMF and Evaluator examples, but it is expected that most users will want full control over the logic in these, so they have been designed to be as easy to modify or replace as possible.
### Frontend API
@ -65,18 +85,20 @@ The client is expected to maintain a connection, waiting for an update from the
### Backend API
The Backend API puts match profiles in state storage which the Matchmaking Function (MMF) can access and use to decide which players should be put into a match together, then return those matches to dedicated game server instances.
The Backend API writes match objects to state storage which the Matchmaking Functions (MMFs) access to decide which players should be matched. It returns the results from those MMFs.
The Backend API is a server application that implements the [gRPC](https://grpc.io/) service defined in `api/protobuf-spec/backend.proto`. At the most basic level, it expects to be connected to your online infrastructure (probably to your server scaling manager or scheduler, or even directly to a dedicated game server), and to receive:
The Backend API is a server application that implements the [gRPC](https://grpc.io/) service defined in `api/protobuf-spec/backend.proto`. At the most basic level, it expects to be connected to your online infrastructure (probably to your server scaling manager or **director**, or even directly to a dedicated game server), and to receive:
* A **unique ID** for a matchmaking profile.
* A **json blob** containing all the match-related data you want to use in your matchmaking function, in an 'empty' match object.
* A **json blob** containing all the matching-related data and filters you want to use in your matchmaking function.
* An optional list of **roster**s to hold the resulting teams chosen by your matchmaking function.
* An optional set of **filters** that define player pools your matchmaking function will choose players from.
Your game backend is expected to maintain a connection, waiting for 'filled' match objects containing a roster of players. The Backend API also provides a return path for your game backend to return dedicated game server connection details (an 'assignment') to the game client, and to delete these 'assignments'.
Your game backend is expected to maintain a connection, waiting for 'filled' match objects containing a roster of players. The Backend API also provides a return path for your game backend to return dedicated game server connection details (an 'assignment') to the game client, and to delete these 'assignments'.
### Matchmaking Function Orchestrator (MMFOrc)
The MMFOrc kicks off your custom matchmaking function (MMF) for every profile submitted to the Backend API. It also runs the Evaluator to resolve conflicts in case more than one of your profiles matched the same players.
The MMFOrc kicks off your custom matchmaking function (MMF) for every unique profile submitted to the Backend API in a match object. It also runs the Evaluator to resolve conflicts in case more than one of your profiles matched the same players.
The MMFOrc exists to orchestrate/schedule your **custom components**, running them as often as required to meet the demands of your game. MMFOrc runs in an endless loop, submitting MMFs and Evaluator jobs to Kubernetes.
@ -85,8 +107,8 @@ The MMFOrc exists to orchestrate/schedule your **custom components**, running th
The MMLogic API provides a series of gRPC functions that act as a Matchmaking Function SDK. Much of the basic, boilerplate code for an MMF is the same regardless of what players you want to match together. The MMLogic API offers a gRPC interface for many common MMF tasks, such as:
1. Reading a profile from state storage.
1. Running filters on players in state strorage.
1. Removing chosen players from consideration by other MMFs (by adding them to an ignore list).
1. Running filters on players in state strorage. It automatically removes players on ignore lists as well!
1. Removing chosen players from consideration by other MMFs (by adding them to an ignore list). It does it automatically for you when writing your results!
1. Writing the matchmaking results to state storage.
1. (Optional, NYI) Exporting MMF stats for metrics collection.
@ -96,9 +118,9 @@ More details about the available gRPC calls can be found in the [API Specificati
### Evaluator
The Evaluator resolves conflicts when multiple matches want to include the same player(s).
The Evaluator resolves conflicts when multiple MMFs select the same player(s).
The Evaluator is a component run by the Matchmaker Function Orchestrator (MMFOrc) after the matchmaker functions have been run, and some proposed results are available. The Evaluator looks at all the proposed matches, and if multiple proposals contain the same player(s), it breaks the tie. In many simple matchmaking setups with only a few game modes and matchmaking functions that always look at different parts of the matchmaking pool, the Evaluator may functionally be a no-op or first-in-first-out algorithm. In complex matchmaking setups where, for example, a player can queue for multiple types of matches, the Evaluator provides the critical customizability to evaluate all available proposals and approve those that will passed to your game servers.
The Evaluator is a component run by the Matchmaker Function Orchestrator (MMFOrc) after the matchmaker functions have been run, and some proposed results are available. The Evaluator looks at all the proposals, and if multiple proposals contain the same player(s), it breaks the tie. In many simple matchmaking setups with only a few game modes and well-tuned matchmaking functions, the Evaluator may functionally be a no-op or first-in-first-out algorithm. In complex matchmaking setups where, for example, a player can queue for multiple types of matches, the Evaluator provides the critical customizability to evaluate all available proposals and approve those that will passed to your game servers.
Large-scale concurrent matchmaking functions is a complex topic, and users who wish to do this are encouraged to engage with the [Open Match community](https://github.com/GoogleCloudPlatform/open-match#get-involved) about patterns and best practices.
@ -109,10 +131,10 @@ Matchmaking Functions (MMFs) are run by the Matchmaker Function Orchestrator (MM
- [x] Be packaged in a (Linux) Docker container.
- [x] Read/write from the Open Match state storage &mdash; Open Match ships with Redis as the default state storage.
- [x] Read a profile you wrote to state storage using the Backend API.
- [x] Select from the player data you wrote to state storage using the Frontend API.
- [x] Select from the player data you wrote to state storage using the Frontend API. It must respect all the ignore lists defined in the matchmaker config.
- [ ] Run your custom logic to try to find a match.
- [x] Write the match object it creates to state storage at a specified key.
- [x] Remove the players it selected from consideration by other MMFs.
- [x] Remove the players it selected from consideration by other MMFs by adding them to the appropriate ignore list.
- [x] Notify the MMFOrc of completion.
- [x] (Optional, but recommended) Export stats for metrics collection.
@ -128,7 +150,7 @@ Example MMFs are provided in these languages:
### Structured logging
Logging for Open Match uses the [Golang logrus module](https://github.com/sirupsen/logrus) to provide structured logs. Logs are output to `stdout` in each component, as expected by Docker and Kubernetes. If you have a specific log aggregator as your final destination, we recommend you have a look at the logrus documentation as there is probably a log formatter that plays nicely with your stack.
Logging for Open Match uses the [Golang logrus module](https://github.com/sirupsen/logrus) to provide structured logs. Logs are output to `stdout` in each component, as expected by Docker and Kubernetes. Level and format are configurable via config/matchmaker_config.json. If you have a specific log aggregator as your final destination, we recommend you have a look at the logrus documentation as there is probably a log formatter that plays nicely with your stack.
### Instrumentation for metrics
@ -140,7 +162,7 @@ Open Match uses [OpenCensus](https://opencensus.io/) for metrics instrumentation
By default, Open Match expects you to run Redis *somewhere*. Connection information can be put in the config file (`matchmaker_config.json`) for any Redis instance reachable from the [Kubernetes namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). By default, Open Match sensibly runs in the Kubernetes `default` namespace. In most instances, we expect users will run a copy of Redis in a pod in Kubernetes, with a service pointing to it.
* HA configurations for Redis aren't implemented by the provided Kubernetes resource definition files, but Open Match expects the Redis service to be named `redis-sentinel`, which provides an easier path to multi-instance deployments.
* HA configurations for Redis aren't implemented by the provided Kubernetes resource definition files, but Open Match expects the Redis service to be named `redis`, which provides an easier path to multi-instance deployments.
## Additional examples
@ -148,7 +170,7 @@ By default, Open Match expects you to run Redis *somewhere*. Connection informat
The following examples of how to call the APIs are provided in the repository. Both have a `Dockerfile` and `cloudbuild.yaml` files in their respective directories:
* `examples/frontendclient/main.go` acts as a client to the the Frontend API, putting a player into the queue with simulated latencies from major metropolitan cities and a couple of other matchmaking attributes. It then waits for you to manually put a value in Redis to simulate a server connection string being written using the backend API 'CreateAssignments' call, and displays that value on stdout for you to verify.
* `test/cmd/frontendclient/main.go` acts as a client to the the Frontend API, putting a player into the queue with simulated latencies from major metropolitan cities and a couple of other matchmaking attributes. It then waits for you to manually put a value in Redis to simulate a server connection string being written using the backend API 'CreateAssignments' call, and displays that value on stdout for you to verify.
* `examples/backendclient/main.go` calls the Backend API and passes in the profile found in `backendstub/profiles/testprofile.json` to the `ListMatches` API endpoint, then continually prints the results until you exit, or there are insufficient players to make a match based on the profile..
## Usage
@ -161,18 +183,83 @@ Once we reach a 1.0 release, we plan to produce publicly available (Linux) Docke
### Compiling from source
All components of Open Match produce (Linux) Docker container images as artifacts, and there are included `Dockerfile`s for each. [Google Cloud Platform Cloud Build](https://cloud.google.com/cloud-build/docs/) users will also find `cloudbuild_COMPONENT.yaml` files for each component in the repository root.
The easiest way to build Open Match is to use the Makefile. Before you can use the Makefile make sure you have the following dependencies:
```bash
# Install Open Match Toolchain Dependencies (Debian other OSes including Mac OS X have similar dependencies)
sudo apt-get update; sudo apt-get install -y -q python3 python3-virtualenv virtualenv make google-cloud-sdk git unzip tar
# Setup your repository like Go workspace, https://golang.org/doc/code.html#Workspaces
# This requirement will go away soon.
mkdir -p workspace/src/github.com/GoogleCloudPlatform/
cd workspace/src/github.com/GoogleCloudPlatform/
export GOPATH=$HOME/workspace
export GO111MODULE=on
git clone https://github.com/GoogleCloudPlatform/open-match.git
cd open-match
```
[Docker](https://docs.docker.com/install/) and [Go 1.11+](https://golang.org/dl/) is also required. If your distro is new enough you can probably run `sudo apt-get install -y golang` or download the newest version from https://golang.org/.
To build all the artifacts of Open Match you can simply run the following commands.
```bash
# Downloads all the tools needed to build Open Match
make install-toolchain
# Generates protocol buffer code files
make all-protos
# Builds all the binaries
make all
# Builds all the images.
make build-images
```
Once build you can use a command like `docker images` to see all the images that were build.
Before creating a pull request you can run `make local-cloud-build` to simulate a Cloud Build run to check for regressions.
The directory structure is a typical Go structure so if you do the following you should be able to work on this project within your IDE.
```bash
cd $GOPATH
mkdir -p src/github.com/GoogleCloudPlatform/
cd src/github.com/GoogleCloudPlatform/
# If you're going to contribute you'll want to fork open-match, see CONTRIBUTING.md for details.
git clone https://github.com/GoogleCloudPlatform/open-match.git
cd open-match
# Open IDE in this directory.
```
Lastly, this project uses go modules so you'll want to set `export GO111MODULE=on` before building.
## Zero to Open Match
To deploy Open Match quickly to a Kubernetes cluster run these commands.
```bash
# Downloads all the tools.
make install-toolchain
# Create a GKE Cluster
make create-gke-cluster
# OR Create a Minikube Cluster
make create-mini-cluster
# Install Helm
make push-helm
# Build and push images
make push-images -j4
# Deploy Open Match with example functions
make install-chart install-example-chart
```
## Docker Image Builds
All the core components for Open Match are written in Golang and use the [Dockerfile multistage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/). This pattern uses intermediate Docker containers as a Golang build environment while producing lightweight, minimized container images as final build artifacts. When the project is ready for production, we will modify the `Dockerfile`s to uncomment the last build stage. Although this pattern is great for production container images, it removes most of the utilities required to troubleshoot issues during development.
### Configuration
## Configuration
Currently, each component reads a local config file `matchmaker_config.json`, and all components assume they have the same configuration. To this end, there is a single centralized config file located in the `<REPO_ROOT>/config/` which is symlinked to each component's subdirectory for convenience when building locally. When `docker build`ing the component container images, the Dockerfile copies the centralized config file into the component directory.
We plan to replace this with a Kubernetes-managed config with dynamic reloading when development time allows. Pull requests are welcome!
We plan to replace this with a Kubernetes-managed config with dynamic reloading, please join the discussion in [Issue #42](issues/42).
### Guides
* [Production guide](./docs/production.md) Lots of best practices to be written here before 1.0 release. **WIP**
* [Production guide](./docs/production.md) Lots of best practices to be written here before 1.0 release, right now it's a scattered collection of notes. **WIP**
* [Development guide](./docs/development.md)
### Reference
@ -213,8 +300,8 @@ Open Match is in active development - we would love your help in shaping its fut
Apache 2.0
# Planned improvements
See the [provisional roadmap](docs/roadmap.md) for more information on upcoming releases.
## Documentation
- [ ] “Writing your first matchmaker” getting started guide will be included in an upcoming version.
@ -222,25 +309,27 @@ Apache 2.0
- [ ] Documentation on release process and release calendar.
## State storage
- [ ] All state storage operations should be isolated from core components into the `statestorage/` modules. This is necessary precursor work to enabling Open Match state storage to use software other than Redis.
- [ ] [The Redis deployment should have an example HA configuration](https://github.com/GoogleCloudPlatform/open-match/issues/41)
- [ ] Redis watch should be unified to watch a hash and stream updates. The code for this is written and validated but not committed yet. We don't want to support two redis watcher code paths, so the backend watch of the match object should be switched to unify the way the frontend and backend watch keys. The backend part of this is in but the frontend part is in another branch and will be committed later.
- [ ] Player/Group records generated when a client enters the matchmaking pool need to be removed after a certain amount of time with no activity. When using Redis, this will be implemented as a expiration on the player record.
- [X] All state storage operations should be isolated from core components into the `statestorage/` modules. This is necessary precursor work to enabling Open Match state storage to use software other than Redis.
- [X] [The Redis deployment should have an example HA configuration](https://github.com/GoogleCloudPlatform/open-match/issues/41)
- [X] Redis watch should be unified to watch a hash and stream updates. The code for this is written and validated but not committed yet.
- [ ] We don't want to support two redis watcher code paths, but we will until golang protobuf reflection is a bit more usable. [Design doc](https://docs.google.com/document/d/19kfhro7-CnBdFqFk7l4_HmwaH2JT_Rhw5-2FLWLEGGk/edit#heading=h.q3iwtwhfujjx), [github issue](https://github.com/golang/protobuf/issues/364)
- [X] Player/Group records generated when a client enters the matchmaking pool need to be removed after a certain amount of time with no activity. When using Redis, this will be implemented as a expiration on the player record.
## Instrumentation / Metrics / Analytics
- [ ] Instrumentation of MMFs is in the planning stages. Since MMFs are by design meant to be completely customizable (to the point of allowing any process that can be packaged in a Docker container), metrics/stats will need to have an expected format and formalized outgoing pathway. Currently the thought is that it might be that the metrics should be written to a particular key in statestorage in a format compatible with opencensus, and will be collected, aggreggated, and exported to Prometheus using another process.
- [ ] [OpenCensus tracing](https://opencensus.io/core-concepts/tracing/) will be implemented in an upcoming version.
- [ ] Read logrus logging configuration from matchmaker_config.json.
- [ ] [OpenCensus tracing](https://opencensus.io/core-concepts/tracing/) will be implemented in an upcoming version. This is likely going to require knative.
- [X] Read logrus logging configuration from matchmaker_config.json.
## Security
- [ ] The Kubernetes service account used by the MMFOrc should be updated to have min required permissions.
- [ ] The Kubernetes service account used by the MMFOrc should be updated to have min required permissions. [Issue 52](issues/52)
## Kubernetes
- [ ] Autoscaling isn't turned on for the Frontend or Backend API Kubernetes deployments by default.
- [ ] A [Helm](https://helm.sh/) chart to stand up Open Match will be provided in an upcoming version. For now just use the [installation YAMLs](./install/yaml).
- [ ] A [Helm](https://helm.sh/) chart to stand up Open Match may be provided in an upcoming version. For now just use the [installation YAMLs](./install/yaml).
- [ ] A knative-based implementation of MMFs is in the planning stages.
## CI / CD / Build
- [ ] We plan to host 'official' docker images for all release versions of the core components in publicly available docker registries soon.
- [ ] We plan to host 'official' docker images for all release versions of the core components in publicly available docker registries soon. This is tracked in [Issue #45](issues/45) and is blocked by [Issue 42](issues/42).
- [ ] CI/CD for this repo and the associated status tags are planned.
- [ ] Golang unit tests will be shipped in an upcoming version.
- [ ] A full load-testing and e2e testing suite will be included in an upcoming version.

View File

@ -4,12 +4,9 @@ This directory contains the API specification files for Open Match. API documena
* [Protobuf .proto files for all APIs](./protobuf-spec/)
These proto files are copied to the container image during `docker build` for the Open Match core components. The `Dockerfiles` handle the compilation for you transparently, and copy the resulting `SPEC.pb.go` files to the appropriate place in your final container image.
References:
* [gRPC](https://grpc.io/)
* [Language Guide (proto3)](https://developers.google.com/protocol-buffers/docs/proto3)
Manual gRPC compilation commmand, from the directory containing the proto:
```protoc -I . ./<filename>.proto --go_out=plugins=grpc:.```
If you want to regenerate the golang gRPC modules (for local Open Match core component development, for example), the `protoc_go.sh` file in this directory may be of use to you!

View File

@ -21,35 +21,35 @@ service Backend {
// - rosters, if you choose to fill them in your MMF. (Recommended)
// - pools, if you used the MMLogicAPI in your MMF. (Recommended, and provides stats)
rpc CreateMatch(messages.MatchObject) returns (messages.MatchObject) {}
// Continually run MMF and stream matchobjects that fit this profile until
// client closes the connection. Same inputs/outputs as CreateMatch.
// Continually run MMF and stream MatchObjects that fit this profile until
// the backend client closes the connection. Same inputs/outputs as CreateMatch.
rpc ListMatches(messages.MatchObject) returns (stream messages.MatchObject) {}
// Delete a matchobject from state storage manually. (Matchobjects in state
// storage will also automatically expire after a while)
// Delete a MatchObject from state storage manually. (MatchObjects in state
// storage will also automatically expire after a while, defined in the config)
// INPUT: MatchObject message with the 'id' field populated.
// (All other fields are ignored.)
rpc DeleteMatch(messages.MatchObject) returns (messages.Result) {}
// Call fors communication of connection info to players.
// Calls for communication of connection info to players.
// Write the connection info for the list of players in the
// Assignments.messages.Rosters to state storage. The FrontendAPI is
// Assignments.messages.Rosters to state storage. The Frontend API is
// responsible for sending anything sent here to the game clients.
// Sending a player to this function kicks off a process that removes
// the player from future matchmaking functions by adding them to the
// 'deindexed' player list and then deleting their player ID from state storage
// indexes.
// INPUT: Assignments message with these fields populated:
// - connection_info, anything you write to this string is sent to Frontend API
// - assignment, anything you write to this string is sent to Frontend API
// - rosters. You can send any number of rosters, containing any number of
// player messages. All players from all rosters will be sent the connection_info.
// The only field in the Player object that is used by CreateAssignments is
// the id field. All others are silently ignored.
// player messages. All players from all rosters will be sent the assignment.
// The only field in the Roster's Player messages used by CreateAssignments is
// the id field. All other fields in the Player messages are silently ignored.
rpc CreateAssignments(messages.Assignments) returns (messages.Result) {}
// Remove DGS connection info from state storage for players.
// INPUT: Roster message with the 'players' field populated.
// The only field in the Player object that is used by
// The only field in the Roster's Player messages used by
// DeleteAssignments is the 'id' field. All others are silently ignored. If
// you need to delete multiple rosters, make multiple calls.
rpc DeleteAssignments(messages.Roster) returns (messages.Result) {}

View File

@ -1,23 +1,65 @@
// TODO: In a future version, these messages will be moved/merged with those in om_messages.proto
syntax = 'proto3';
package api;
option go_package = "github.com/GoogleCloudPlatform/open-match/internal/pb";
import 'api/protobuf-spec/messages.proto';
service Frontend {
rpc CreateRequest(Group) returns (messages.Result) {}
rpc DeleteRequest(Group) returns (messages.Result) {}
rpc GetAssignment(PlayerId) returns (messages.ConnectionInfo) {}
rpc DeleteAssignment(PlayerId) returns (messages.Result) {}
}
// Call to start matchmaking for a player
// Data structure for a group of players to pass to the matchmaking function.
// Obviously, the group can be a group of one!
message Group{
string id = 1; // By convention, string of space-delimited playerIDs
string properties = 2; // By convention, a JSON-encoded string
}
// CreatePlayer will put the player in state storage, and then look
// through the 'properties' field for the attributes you have defined as
// indices your matchmaker config. If the attributes exist and are valid
// integers, they will be indexed.
// INPUT: Player message with these fields populated:
// - id
// - properties
// OUTPUT: Result message denoting success or failure (and an error if
// necessary)
rpc CreatePlayer(messages.Player) returns (messages.Result) {}
message PlayerId {
string id = 1; // By convention, a UUID
// Call to stop matchmaking for a player
// DeletePlayer removes the player from state storage by doing the
// following:
// 1) Delete player from configured indices. This effectively removes the
// player from matchmaking when using recommended MMF patterns.
// Everything after this is just cleanup to save stage storage space.
// 2) 'Lazily' delete the player's state storage record. This is kicked
// off in the background and may take some time to complete.
// 2) 'Lazily' delete the player's metadata indicies (like, the timestamp when
// they called CreatePlayer, and the last time the record was accessed). This
// is also kicked off in the background and may take some time to complete.
// INPUT: Player message with the 'id' field populated.
// OUTPUT: Result message denoting success or failure (and an error if
// necessary)
rpc DeletePlayer(messages.Player) returns (messages.Result) {}
// Calls to access matchmaking results for a player
// GetUpdates streams matchmaking results from Open Match for the
// provided player ID.
// INPUT: Player message with the 'id' field populated.
// OUTPUT: a stream of player objects with one or more of the following
// fields populated, if an update to that field is seen in state storage:
// - 'assignment': string that usually contains game server connection information.
// - 'status': string to communicate current matchmaking status to the client.
// - 'error': string to pass along error information to the client.
//
// During normal operation, the expectation is that the 'assignment' field
// will be updated by a Backend process calling the 'CreateAssignments' Backend API
// endpoint. 'Status' and 'Error' are free for developers to use as they see fit.
// Even if you had multiple players enter a matchmaking request as a group, the
// Backend API 'CreateAssignments' call will write the results to state
// storage separately under each player's ID. OM expects you to make all game
// clients 'GetUpdates' with their own ID from the Frontend API to get
// their results.
//
// NOTE: This call generates a small amount of load on the Frontend API and state
// storage while watching the player record for updates. You are expected
// to close the stream from your client after receiving your matchmaking
// results (or a reasonable timeout), or you will continue to
// generate load on OM until you do!
// NOTE: Just bear in mind that every update will send egress traffic from
// Open Match to game clients! Frugality is recommended.
rpc GetUpdates(messages.Player) returns (stream messages.Player) {}
}

View File

@ -0,0 +1,18 @@
syntax = 'proto3';
package api;
option go_package = "github.com/GoogleCloudPlatform/open-match/internal/pb";
// The protobuf messages sent in the gRPC calls are defined 'messages.proto'.
import 'api/protobuf-spec/messages.proto';
// The MMF proto defines the API for running MMFs as long-lived, 'serving'
// functions inside of the kubernetes cluster.
service Function {
// The assumption is that there will be one service for each MMF that is
// being served. Build your MMF in the appropriate serving harness, deploy it
// to the K8s cluster with a unique service name, then connect to that service
// and call 'Run()' to execute the fuction.
rpc Run(messages.Arguments) returns (messages.Result) {}
}

View File

@ -14,11 +14,12 @@ option go_package = "github.com/GoogleCloudPlatform/open-match/internal/pb";
// MatchObject as input only require a few of them to be filled in. Check the
// gRPC function in question for more details.
message MatchObject{
string id = 1; // By convention, a UUID
string id = 1; // By convention, an Xid
string properties = 2; // By convention, a JSON-encoded string
string error = 3; // Last error encountered.
repeated Roster rosters = 4; // Rosters of players.
repeated PlayerPool pools = 5; // 'Hard' filters, and the players who match them.
string status = 6; // Resulting status of the match function
}
// Data structure to hold a list of players in a match.
@ -55,16 +56,26 @@ message PlayerPool{
Stats stats = 4; // Statisticss for the last time this Pool was retrieved from state storage.
}
// Data structure to hold details about a player
// Open Match's internal representation and wire protocol format for "Players".
// In order to enter matchmaking using the Frontend API, your client code should generate
// a consistent (same result for each client every time they launch) with an ID and
// properties filled in (for more details about valid values for these fields,
// see the documentation).
// Players contain a number of fields, but the gRPC calls that take a
// Player as input only require a few of them to be filled in. Check the
// gRPC function in question for more details.
message Player{
message Attribute{
string name = 1; // Name should match a Filter.attribute field.
int64 value = 2;
}
string id = 1; // By convention, a UUID
string id = 1; // By convention, an Xid
string properties = 2; // By convention, a JSON-encoded string
string pool = 3; // Optionally used to specify the PlayerPool in which to find a player.
repeated Attribute attributes= 4; // Attributes of this player.
string assignment = 5; // By convention, ip:port of a DGS to connect to
string status = 6; // Arbitrary developer-chosen string.
string error = 7; // Arbitrary developer-chosen string.
}
@ -78,13 +89,27 @@ message Result{
message IlInput{
}
// Simple message used to pass the connection string for the DGS to the player.
// DEPRECATED: Likely to be integrated into another protobuf message in a future version.
message ConnectionInfo{
string connection_string = 1; // Passed by the matchmaker to game clients without modification.
}
message Assignments{
repeated Roster rosters = 1;
ConnectionInfo connection_info = 2;
string assignment = 10;
}
// Messages for gRPC-served matchmaking functions.
// The message for passing in the per-request identifiers
// to a matchmaking function; used so it knows which records to
// write/update in state storage.
message Request {
string profile_id = 1;
string proposal_id = 2;
string request_id = 3;
string error_id = 4;
string timestamp = 5;
}
// The message for passing all the necessary arguments to a
// matchmaking function.
message Arguments{
Request request = 1;
MatchObject matchobject = 2;
}

View File

@ -1,3 +1,5 @@
#!/bin/bash
python3 -m grpc_tools.protoc -I . --python_out=. --grpc_python_out=. mmlogic.proto
python3 -m grpc_tools.protoc -I . --python_out=. --grpc_python_out=. messages.proto
cp *pb2* $OM/examples/functions/python3/simple/.

View File

@ -19,6 +19,7 @@ cd $GOPATH/src
protoc \
${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/api/protobuf-spec/backend.proto \
${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/api/protobuf-spec/frontend.proto \
${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/api/protobuf-spec/function.proto \
${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/api/protobuf-spec/mmlogic.proto \
${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/api/protobuf-spec/messages.proto \
-I ${GOPATH}/src/github.com/GoogleCloudPlatform/open-match/ \

295
cloudbuild.yaml Normal file
View File

@ -0,0 +1,295 @@
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Open Match Script for Google Cloud Build #
################################################################################
# To run this locally:
# cloud-build-local --config=cloudbuild.yaml --dryrun=false --substitutions=_OM_VERSION=DEV .
# To run this remotely:
# gcloud builds submit --config=cloudbuild.yaml --substitutions=_OM_VERSION=DEV .
# Requires gcloud to be installed to work. (https://cloud.google.com/sdk/)
# gcloud auth login
# gcloud components install cloud-build-local
# This YAML contains all the build steps for building Open Match.
# All PRs are verified against this script to prevent build breakages and regressions.
# Conventions
# Each build step is ID'ed with "Prefix: Description".
# The prefix portion determines what kind of step it is and it's impact.
# Docker Image: Read-Only, outputs a docker image.
# Lint: Read-Only, verifies correctness and formatting of a file.
# Build: Read-Write, outputs a build artifact. Ok to run in parallel if the artifact will not collide with another one.
# Generate: Read-Write, outputs files within /workspace that are used in other build step. Do not run these in parallel.
# Setup: Read-Write, similar to generate but steps that run before any other step.
# Some useful things to know about Cloud Build.
# The root of this repository is always stored in /workspace.
# Any modifications that occur within /workspace are persisted between builds anything else is forgotten.
# If a build step has intermediate files that need to be persisted for a future step then use volumes.
# An example of this is the go-vol which is where the pkg/ data for go mod is stored.
# More information here: https://cloud.google.com/cloud-build/docs/build-config#build_steps
# A build step is basically a docker image that is tuned for Cloud Build,
# https://github.com/GoogleCloudPlatform/cloud-builders/tree/master/go
steps:
- id: 'Docker Image: open-match-base-build'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'open-match-base-build', '-f', 'Dockerfile.base-build', '.']
waitFor: ['-']
- id: 'Docker Image: backendapi'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-backendapi:${_OM_VERSION}-${SHORT_SHA}', 'cmd/backendapi']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: frontendapi'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-frontendapi:${_OM_VERSION}-${SHORT_SHA}', 'cmd/frontendapi']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: mmforc'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmforc:${_OM_VERSION}-${SHORT_SHA}', 'cmd/mmforc']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: mmlogicapi'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmlogicapi:${_OM_VERSION}-${SHORT_SHA}', 'cmd/mmlogicapi']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: Evaluator Simple'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-evaluator-simple:${_OM_VERSION}-${SHORT_SHA}', 'examples/evaluators/golang/simple']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: openmatch-mmf-cs-mmlogic-simple'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmf-cs-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}', '.']
dir: 'examples/functions/csharp/simple'
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: openmatch-mmf-go-mmlogic-simple'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmf-go-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}', '-f', 'examples/functions/golang/manual-simple/Dockerfile', '.']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: openmatch-mmf-php-mmlogic-simple'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmf-php-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}', '-f', 'examples/functions/php/mmlogic-simple/Dockerfile', '.']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: openmatch-mmf-py3-mmlogic-simple'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-mmf-py3-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}', '-f', 'examples/functions/python3/mmlogic-simple/Dockerfile', '.']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: backendclient'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-backendclient:${_OM_VERSION}-${SHORT_SHA}', 'examples/backendclient']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: clientloadgen'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-clientloadgen:${_OM_VERSION}-${SHORT_SHA}', 'test/cmd/clientloadgen']
waitFor: ['Docker Image: open-match-base-build']
- id: 'Docker Image: frontendclient'
name: gcr.io/cloud-builders/docker
args: ['build', '-t', 'gcr.io/$PROJECT_ID/openmatch-frontendclient:${_OM_VERSION}-${SHORT_SHA}', 'test/cmd/frontendclient']
waitFor: ['Docker Image: open-match-base-build']
# Cannot enable, produces lots of errors but can be useful.
#- id: 'Lint: YAML Files'
# name: wpengine/yamllint
# args: ['/workspace']
# waitFor: ['-']
- id: 'Lint: Kubernetes Configs'
name: garethr/kubeval
args: ['install/yaml/01-redis.yaml', 'install/yaml/02-open-match.yaml']
waitFor: ['-']
- id: 'Setup: Pull Dependencies'
name: golang
env: ['GO111MODULE=on', 'CGO_ENABLED=0']
args: ['go', 'mod', 'download']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['-']
- id: 'Lint: Formatting'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'fmt', './...']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Lint: Vetting'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'vet', './...']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Test: 10x with Race Detection and Coverage'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off']
args: ['go', 'test', './...', '-race', '-test.count', '10', '-cover']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: All Binaries'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', './...']
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: cmd/backendapi/backendapi'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'cmd/backendapi/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: cmd/frontendapi/frontendapi'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'cmd/frontendapi/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: cmd/mmforc/mmforc'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'cmd/mmforc/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: cmd/mmlogicapi/mmlogicapi'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'cmd/mmlogicapi/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: examples/functions/golang/manual-simple'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'examples/functions/golang/manual-simple/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: examples/backendclient'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'examples/backendclient/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: test/cmd/clientloadgen'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'test/cmd/clientloadgen/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
- id: 'Build: test/cmd/frontendclient'
name: golang
env: ['GO111MODULE=on', 'GOPROXY=off', 'CGO_ENABLED=0']
args: ['go', 'build', '-a', '-installsuffix', 'cgo', '.']
dir: 'test/cmd/frontendclient/'
volumes:
- name: 'go-vol'
path: '/go'
waitFor: ['Setup: Pull Dependencies']
artifacts:
objects:
location: gs://open-match-build-artifacts/output/
paths:
- cmd/backendapi/backendapi
- cmd/frontendapi/frontendapi
- cmd/mmforc/mmforc
- cmd/mmlogicapi/mmlogicapi
- examples/functions/golang/manual-simple/manual-simple
- examples/backendclient/backendclient
- test/cmd/clientloadgen/clientloadgen
- test/cmd/frontendclient/frontendclient
images:
- 'gcr.io/$PROJECT_ID/openmatch-backendapi:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-frontendapi:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmforc:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmlogicapi:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-evaluator-simple:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmf-cs-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmf-go-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmf-php-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-mmf-py3-mmlogic-simple:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-backendclient:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-clientloadgen:${_OM_VERSION}-${SHORT_SHA}'
- 'gcr.io/$PROJECT_ID/openmatch-frontendclient:${_OM_VERSION}-${SHORT_SHA}'
substitutions:
_OM_VERSION: 0.4.0
logsBucket: 'gs://open-match-build-logs/'
options:
sourceProvenanceHash: ['SHA256']
machineType: 'N1_HIGHCPU_8'
# TODO: The build is slow because we don't vendor. go get takes a very long time.
# Also we are rebuilding a lot of code unnecessarily. This should improve once
# we have new hermetic and reproducible Dockerfiles.
timeout: 1200s
# TODO Build Steps
# api/protobuf-spec/*: Build Protocol Buffers (golang, python, php)
# config/matchmaker_config.yaml: Lint this file so it's verified as a valid YAML file.
# deployments/k8s: Verify with kubelint.
# examples/profiles/*.json: Verify valid JSON files.
#
# Consolidate many of these build steps via Makefile.
# Caching of dependencies is a serious problem. Cloud Build does not complete within 20 minutes!

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-backendapi:dev',
'-f', 'Dockerfile.backendapi',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-backendapi:dev']

View File

@ -1,10 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-devbase:latest',
'--cache-from=gcr.io/$PROJECT_ID/openmatch-devbase:latest',
'-f', 'Dockerfile.base',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-devbase:latest']

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-frontendapi:dev',
'-f', 'Dockerfile.frontendapi',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-frontendapi:dev']

View File

@ -1,11 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'pull', 'gcr.io/$PROJECT_ID/openmatch-devbase:latest' ]
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmf:go',
'-f', 'Dockerfile.mmf_go',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmf:go']

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmf:php',
'-f', 'Dockerfile.mmf_php',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmf:php']

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmf:py3',
'-f', 'Dockerfile.mmf_py3',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmf:py3']

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmforc:dev',
'-f', 'Dockerfile.mmforc',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmforc:dev']

View File

@ -1,9 +0,0 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmlogicapi:dev',
'-f', 'Dockerfile.mmlogicapi',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmlogicapi:dev']

View File

@ -0,0 +1,9 @@
FROM open-match-base-build as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/backendapi/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/backendapi/backendapi .
ENTRYPOINT ["/backendapi"]

View File

@ -0,0 +1,8 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-backendapi:0.4',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-backendapi:0.4']

View File

@ -1,6 +1,7 @@
/*
This application handles all the startup and connection scaffolding for
running a gRPC server serving the APIService as defined in proto/backend.proto
running a gRPC server serving the APIService as defined in
${OM_ROOT}/internal/pb/backend.pb.go
All the actual important bits are in the API Server source code: apisrv/apisrv.go
@ -22,84 +23,9 @@ limitations under the License.
package main
import (
"errors"
"os"
"os/signal"
"github.com/GoogleCloudPlatform/open-match/cmd/backendapi/apisrv"
"github.com/GoogleCloudPlatform/open-match/config"
"github.com/GoogleCloudPlatform/open-match/internal/metrics"
redishelpers "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"go.opencensus.io/plugin/ocgrpc"
"github.com/GoogleCloudPlatform/open-match/internal/app/backendapi"
)
var (
// Logrus structured logging setup
beLogFields = log.Fields{
"app": "openmatch",
"component": "backend",
"caller": "backendapi/main.go",
}
beLog = log.WithFields(beLogFields)
// Viper config management setup
cfg = viper.New()
err = errors.New("")
)
func init() {
// Logrus structured logging initialization
// Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus
log.AddHook(metrics.NewHook(apisrv.BeLogLines, apisrv.KeySeverity))
// Viper config management initialization
cfg, err = config.Read()
if err != nil {
beLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Unable to load config file")
}
if cfg.GetBool("debug") == true {
log.SetLevel(log.DebugLevel) // debug only, verbose - turn off in production!
beLog.Warn("Debug logging configured. Not recommended for production!")
}
// Configure OpenCensus exporter to Prometheus
// metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you
// want to register is in an array, so append any views you want from other
// packages to a single array here.
ocServerViews := apisrv.DefaultBackendAPIViews // BackendAPI OpenCensus views.
ocServerViews = append(ocServerViews, ocgrpc.DefaultServerViews...) // gRPC OpenCensus views.
ocServerViews = append(ocServerViews, config.CfgVarCountView) // config loader view.
// Waiting on https://github.com/opencensus-integrations/redigo/pull/1
// ocServerViews = append(ocServerViews, redis.ObservabilityMetricViews...) // redis OpenCensus views.
beLog.WithFields(log.Fields{"viewscount": len(ocServerViews)}).Info("Loaded OpenCensus views")
metrics.ConfigureOpenCensusPrometheusExporter(cfg, ocServerViews)
}
func main() {
// Connect to redis
pool := redishelpers.ConnectionPool(cfg)
defer pool.Close()
// Instantiate the gRPC server with the connections we've made
beLog.WithFields(log.Fields{"testfield": "test"}).Info("Attempting to start gRPC server")
srv := apisrv.New(cfg, pool)
// Run the gRPC server
err := srv.Open()
if err != nil {
beLog.WithFields(log.Fields{"error": err.Error()}).Fatal("Failed to start gRPC server")
}
// Exit when we see a signal
terminate := make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt)
<-terminate
beLog.Info("Shutting down gRPC server")
backendapi.RunApplication()
}

View File

@ -1 +0,0 @@
../../config/matchmaker_config.json

View File

@ -0,0 +1 @@
../../config/matchmaker_config.yaml

View File

@ -1,749 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend.proto
/*
Package backend is a generated protocol buffer package.
It is generated from these files:
backend.proto
It has these top-level messages:
Profile
MatchObject
Roster
Filter
Stats
PlayerPool
Player
Result
IlInput
Timestamp
ConnectionInfo
Assignments
*/
package backend
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Profile struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Properties string `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
// When you send a Profile to the backendAPI, it looks to see if you populated
// this field with protobuf-encoded PlayerPool objects containing valid the filters
// objects. If you did, they are used by OM. If you didn't, the backendAPI
// next looks in your properties blob at the key specified in the 'jsonkeys.pools'
// config value from config/matchmaker_config.json - If it finds valid player
// pool definitions at that key, it will try to unmarshal them into this field.
// If you didn't specify valid player pools in either place, OM assumes you
// know what you're doing and just leaves this unpopulatd.
Pools []*PlayerPool `protobuf:"bytes,4,rep,name=pools" json:"pools,omitempty"`
}
func (m *Profile) Reset() { *m = Profile{} }
func (m *Profile) String() string { return proto.CompactTextString(m) }
func (*Profile) ProtoMessage() {}
func (*Profile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Profile) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Profile) GetProperties() string {
if m != nil {
return m.Properties
}
return ""
}
func (m *Profile) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Profile) GetPools() []*PlayerPool {
if m != nil {
return m.Pools
}
return nil
}
// A MMF takes the Profile object above, and generates a MatchObject.
type MatchObject struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Properties string `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
Rosters []*Roster `protobuf:"bytes,3,rep,name=rosters" json:"rosters,omitempty"`
Pools []*PlayerPool `protobuf:"bytes,4,rep,name=pools" json:"pools,omitempty"`
}
func (m *MatchObject) Reset() { *m = MatchObject{} }
func (m *MatchObject) String() string { return proto.CompactTextString(m) }
func (*MatchObject) ProtoMessage() {}
func (*MatchObject) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *MatchObject) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *MatchObject) GetProperties() string {
if m != nil {
return m.Properties
}
return ""
}
func (m *MatchObject) GetRosters() []*Roster {
if m != nil {
return m.Rosters
}
return nil
}
func (m *MatchObject) GetPools() []*PlayerPool {
if m != nil {
return m.Pools
}
return nil
}
// Data structure to hold a list of players in a match.
type Roster struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Players []*Player `protobuf:"bytes,2,rep,name=players" json:"players,omitempty"`
}
func (m *Roster) Reset() { *m = Roster{} }
func (m *Roster) String() string { return proto.CompactTextString(m) }
func (*Roster) ProtoMessage() {}
func (*Roster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Roster) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Roster) GetPlayers() []*Player {
if m != nil {
return m.Players
}
return nil
}
// A filter to apply to the player pool.
type Filter struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Attribute string `protobuf:"bytes,2,opt,name=attribute" json:"attribute,omitempty"`
Maxv int64 `protobuf:"varint,3,opt,name=maxv" json:"maxv,omitempty"`
Minv int64 `protobuf:"varint,4,opt,name=minv" json:"minv,omitempty"`
Stats *Stats `protobuf:"bytes,5,opt,name=stats" json:"stats,omitempty"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Filter) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Filter) GetAttribute() string {
if m != nil {
return m.Attribute
}
return ""
}
func (m *Filter) GetMaxv() int64 {
if m != nil {
return m.Maxv
}
return 0
}
func (m *Filter) GetMinv() int64 {
if m != nil {
return m.Minv
}
return 0
}
func (m *Filter) GetStats() *Stats {
if m != nil {
return m.Stats
}
return nil
}
type Stats struct {
Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
Elapsed float64 `protobuf:"fixed64,2,opt,name=elapsed" json:"elapsed,omitempty"`
}
func (m *Stats) Reset() { *m = Stats{} }
func (m *Stats) String() string { return proto.CompactTextString(m) }
func (*Stats) ProtoMessage() {}
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Stats) GetCount() int64 {
if m != nil {
return m.Count
}
return 0
}
func (m *Stats) GetElapsed() float64 {
if m != nil {
return m.Elapsed
}
return 0
}
type PlayerPool struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Filters []*Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"`
Roster *Roster `protobuf:"bytes,3,opt,name=roster" json:"roster,omitempty"`
Stats *Stats `protobuf:"bytes,4,opt,name=stats" json:"stats,omitempty"`
}
func (m *PlayerPool) Reset() { *m = PlayerPool{} }
func (m *PlayerPool) String() string { return proto.CompactTextString(m) }
func (*PlayerPool) ProtoMessage() {}
func (*PlayerPool) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *PlayerPool) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PlayerPool) GetFilters() []*Filter {
if m != nil {
return m.Filters
}
return nil
}
func (m *PlayerPool) GetRoster() *Roster {
if m != nil {
return m.Roster
}
return nil
}
func (m *PlayerPool) GetStats() *Stats {
if m != nil {
return m.Stats
}
return nil
}
// Data structure for a profile to pass to the matchmaking function.
type Player struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Properties string `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
Pool string `protobuf:"bytes,3,opt,name=pool" json:"pool,omitempty"`
Attributes []*Player_Attribute `protobuf:"bytes,4,rep,name=attributes" json:"attributes,omitempty"`
}
func (m *Player) Reset() { *m = Player{} }
func (m *Player) String() string { return proto.CompactTextString(m) }
func (*Player) ProtoMessage() {}
func (*Player) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *Player) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Player) GetProperties() string {
if m != nil {
return m.Properties
}
return ""
}
func (m *Player) GetPool() string {
if m != nil {
return m.Pool
}
return ""
}
func (m *Player) GetAttributes() []*Player_Attribute {
if m != nil {
return m.Attributes
}
return nil
}
type Player_Attribute struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
}
func (m *Player_Attribute) Reset() { *m = Player_Attribute{} }
func (m *Player_Attribute) String() string { return proto.CompactTextString(m) }
func (*Player_Attribute) ProtoMessage() {}
func (*Player_Attribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
func (m *Player_Attribute) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Player_Attribute) GetValue() int64 {
if m != nil {
return m.Value
}
return 0
}
// Simple message to return success/failure and error status.
type Result struct {
Success bool `protobuf:"varint,1,opt,name=success" json:"success,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
}
func (m *Result) Reset() { *m = Result{} }
func (m *Result) String() string { return proto.CompactTextString(m) }
func (*Result) ProtoMessage() {}
func (*Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Result) GetSuccess() bool {
if m != nil {
return m.Success
}
return false
}
func (m *Result) GetError() string {
if m != nil {
return m.Error
}
return ""
}
// IlInput is an empty message reserved for future use.
type IlInput struct {
}
func (m *IlInput) Reset() { *m = IlInput{} }
func (m *IlInput) String() string { return proto.CompactTextString(m) }
func (*IlInput) ProtoMessage() {}
func (*IlInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
// Epoch timestamp in seconds.
type Timestamp struct {
Ts int64 `protobuf:"varint,1,opt,name=ts" json:"ts,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *Timestamp) GetTs() int64 {
if m != nil {
return m.Ts
}
return 0
}
// Simple message used to pass the connection string for the DGS to the player.
type ConnectionInfo struct {
ConnectionString string `protobuf:"bytes,1,opt,name=connection_string,json=connectionString" json:"connection_string,omitempty"`
}
func (m *ConnectionInfo) Reset() { *m = ConnectionInfo{} }
func (m *ConnectionInfo) String() string { return proto.CompactTextString(m) }
func (*ConnectionInfo) ProtoMessage() {}
func (*ConnectionInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *ConnectionInfo) GetConnectionString() string {
if m != nil {
return m.ConnectionString
}
return ""
}
type Assignments struct {
Rosters []*Roster `protobuf:"bytes,1,rep,name=rosters" json:"rosters,omitempty"`
ConnectionInfo *ConnectionInfo `protobuf:"bytes,2,opt,name=connection_info,json=connectionInfo" json:"connection_info,omitempty"`
}
func (m *Assignments) Reset() { *m = Assignments{} }
func (m *Assignments) String() string { return proto.CompactTextString(m) }
func (*Assignments) ProtoMessage() {}
func (*Assignments) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *Assignments) GetRosters() []*Roster {
if m != nil {
return m.Rosters
}
return nil
}
func (m *Assignments) GetConnectionInfo() *ConnectionInfo {
if m != nil {
return m.ConnectionInfo
}
return nil
}
func init() {
proto.RegisterType((*Profile)(nil), "Profile")
proto.RegisterType((*MatchObject)(nil), "MatchObject")
proto.RegisterType((*Roster)(nil), "Roster")
proto.RegisterType((*Filter)(nil), "Filter")
proto.RegisterType((*Stats)(nil), "Stats")
proto.RegisterType((*PlayerPool)(nil), "PlayerPool")
proto.RegisterType((*Player)(nil), "Player")
proto.RegisterType((*Player_Attribute)(nil), "Player.Attribute")
proto.RegisterType((*Result)(nil), "Result")
proto.RegisterType((*IlInput)(nil), "IlInput")
proto.RegisterType((*Timestamp)(nil), "Timestamp")
proto.RegisterType((*ConnectionInfo)(nil), "ConnectionInfo")
proto.RegisterType((*Assignments)(nil), "Assignments")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for API service
type APIClient interface {
// Calls to ask the matchmaker to run a matchmaking function.
//
// Run MMF once. Return a matchobject that fits this profile.
CreateMatch(ctx context.Context, in *Profile, opts ...grpc.CallOption) (*MatchObject, error)
// Continually run MMF and stream matchobjects that fit this profile until
// client closes the connection.
ListMatches(ctx context.Context, in *Profile, opts ...grpc.CallOption) (API_ListMatchesClient, error)
// Delete a matchobject from state storage manually. (Matchobjects in state
// storage will also automatically expire after a while)
DeleteMatch(ctx context.Context, in *MatchObject, opts ...grpc.CallOption) (*Result, error)
// Call for communication of connection info to players.
//
// Write the connection info for the list of players in the
// Assignments.Rosters to state storage. The FrontendAPI is responsible for
// sending anything written here to the game clients.
// TODO: change this to be agnostic; return a 'result' instead of a connection
// string so it can be integrated with session service etc
CreateAssignments(ctx context.Context, in *Assignments, opts ...grpc.CallOption) (*Result, error)
// Remove DGS connection info from state storage for all players in the Roster.
DeleteAssignments(ctx context.Context, in *Roster, opts ...grpc.CallOption) (*Result, error)
}
type aPIClient struct {
cc *grpc.ClientConn
}
func NewAPIClient(cc *grpc.ClientConn) APIClient {
return &aPIClient{cc}
}
func (c *aPIClient) CreateMatch(ctx context.Context, in *Profile, opts ...grpc.CallOption) (*MatchObject, error) {
out := new(MatchObject)
err := grpc.Invoke(ctx, "/API/CreateMatch", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) ListMatches(ctx context.Context, in *Profile, opts ...grpc.CallOption) (API_ListMatchesClient, error) {
stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/API/ListMatches", opts...)
if err != nil {
return nil, err
}
x := &aPIListMatchesClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type API_ListMatchesClient interface {
Recv() (*MatchObject, error)
grpc.ClientStream
}
type aPIListMatchesClient struct {
grpc.ClientStream
}
func (x *aPIListMatchesClient) Recv() (*MatchObject, error) {
m := new(MatchObject)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *aPIClient) DeleteMatch(ctx context.Context, in *MatchObject, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/DeleteMatch", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) CreateAssignments(ctx context.Context, in *Assignments, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/CreateAssignments", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) DeleteAssignments(ctx context.Context, in *Roster, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/DeleteAssignments", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for API service
type APIServer interface {
// Calls to ask the matchmaker to run a matchmaking function.
//
// Run MMF once. Return a matchobject that fits this profile.
CreateMatch(context.Context, *Profile) (*MatchObject, error)
// Continually run MMF and stream matchobjects that fit this profile until
// client closes the connection.
ListMatches(*Profile, API_ListMatchesServer) error
// Delete a matchobject from state storage manually. (Matchobjects in state
// storage will also automatically expire after a while)
DeleteMatch(context.Context, *MatchObject) (*Result, error)
// Call for communication of connection info to players.
//
// Write the connection info for the list of players in the
// Assignments.Rosters to state storage. The FrontendAPI is responsible for
// sending anything written here to the game clients.
// TODO: change this to be agnostic; return a 'result' instead of a connection
// string so it can be integrated with session service etc
CreateAssignments(context.Context, *Assignments) (*Result, error)
// Remove DGS connection info from state storage for all players in the Roster.
DeleteAssignments(context.Context, *Roster) (*Result, error)
}
func RegisterAPIServer(s *grpc.Server, srv APIServer) {
s.RegisterService(&_API_serviceDesc, srv)
}
func _API_CreateMatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Profile)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).CreateMatch(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/CreateMatch",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).CreateMatch(ctx, req.(*Profile))
}
return interceptor(ctx, in, info, handler)
}
func _API_ListMatches_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(Profile)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(APIServer).ListMatches(m, &aPIListMatchesServer{stream})
}
type API_ListMatchesServer interface {
Send(*MatchObject) error
grpc.ServerStream
}
type aPIListMatchesServer struct {
grpc.ServerStream
}
func (x *aPIListMatchesServer) Send(m *MatchObject) error {
return x.ServerStream.SendMsg(m)
}
func _API_DeleteMatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MatchObject)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).DeleteMatch(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/DeleteMatch",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).DeleteMatch(ctx, req.(*MatchObject))
}
return interceptor(ctx, in, info, handler)
}
func _API_CreateAssignments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Assignments)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).CreateAssignments(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/CreateAssignments",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).CreateAssignments(ctx, req.(*Assignments))
}
return interceptor(ctx, in, info, handler)
}
func _API_DeleteAssignments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Roster)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).DeleteAssignments(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/DeleteAssignments",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).DeleteAssignments(ctx, req.(*Roster))
}
return interceptor(ctx, in, info, handler)
}
var _API_serviceDesc = grpc.ServiceDesc{
ServiceName: "API",
HandlerType: (*APIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateMatch",
Handler: _API_CreateMatch_Handler,
},
{
MethodName: "DeleteMatch",
Handler: _API_DeleteMatch_Handler,
},
{
MethodName: "CreateAssignments",
Handler: _API_CreateAssignments_Handler,
},
{
MethodName: "DeleteAssignments",
Handler: _API_DeleteAssignments_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ListMatches",
Handler: _API_ListMatches_Handler,
ServerStreams: true,
},
},
Metadata: "backend.proto",
}
func init() { proto.RegisterFile("backend.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 591 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x51, 0x6f, 0xd3, 0x30,
0x10, 0x9e, 0x9b, 0x26, 0x59, 0x2f, 0x63, 0xa3, 0xd6, 0x1e, 0xa2, 0x31, 0x41, 0xe7, 0x07, 0x56,
0x04, 0x8a, 0xa0, 0x08, 0xb1, 0x17, 0x84, 0xaa, 0x21, 0xa4, 0x4a, 0x20, 0x2a, 0x8f, 0x77, 0x94,
0xa6, 0xee, 0xf0, 0x48, 0xed, 0xc8, 0x76, 0x2a, 0x78, 0x43, 0xf0, 0x9f, 0xf8, 0x2d, 0xfc, 0x1c,
0x14, 0x3b, 0x69, 0x53, 0x41, 0x25, 0xe0, 0xcd, 0xdf, 0xe7, 0xbb, 0xf3, 0x77, 0xdf, 0xe5, 0x02,
0xb7, 0x66, 0x69, 0xf6, 0x89, 0x89, 0x79, 0x52, 0x28, 0x69, 0x24, 0x29, 0x20, 0x9c, 0x2a, 0xb9,
0xe0, 0x39, 0xc3, 0x87, 0xd0, 0xe1, 0xf3, 0x18, 0x0d, 0xd0, 0xb0, 0x47, 0x3b, 0x7c, 0x8e, 0xef,
0x02, 0x14, 0x4a, 0x16, 0x4c, 0x19, 0xce, 0x74, 0xdc, 0xb1, 0x7c, 0x8b, 0xc1, 0x18, 0xba, 0x22,
0x5d, 0xb2, 0xd8, 0xb3, 0x37, 0xf6, 0x8c, 0xcf, 0xc0, 0x2f, 0xa4, 0xcc, 0x75, 0xdc, 0x1d, 0x78,
0xc3, 0x68, 0x14, 0x25, 0xd3, 0x3c, 0xfd, 0xc2, 0xd4, 0x54, 0xca, 0x9c, 0xba, 0x1b, 0xf2, 0x1d,
0x41, 0xf4, 0x36, 0x35, 0xd9, 0xc7, 0x77, 0xb3, 0x1b, 0x96, 0x99, 0x7f, 0x7e, 0xf6, 0x0c, 0x42,
0x25, 0xb5, 0x61, 0x4a, 0xc7, 0x9e, 0x7d, 0x24, 0x4c, 0xa8, 0xc5, 0xb4, 0xe1, 0xff, 0x46, 0xc5,
0x4b, 0x08, 0x5c, 0xd6, 0xba, 0x0d, 0xb4, 0xd5, 0x46, 0x58, 0xd8, 0x94, 0x4a, 0x80, 0x7b, 0xc3,
0x95, 0xa0, 0x0d, 0x4f, 0xbe, 0x22, 0x08, 0x5e, 0xf3, 0x7c, 0x57, 0x85, 0x53, 0xe8, 0xa5, 0xc6,
0x28, 0x3e, 0x2b, 0x0d, 0xab, 0x9b, 0xd8, 0x10, 0x55, 0xc6, 0x32, 0xfd, 0xbc, 0xb2, 0xd6, 0x79,
0xd4, 0x9e, 0x2d, 0xc7, 0xc5, 0x2a, 0xee, 0xd6, 0x1c, 0x17, 0x2b, 0x7c, 0x0a, 0xbe, 0x36, 0xa9,
0xd1, 0xb1, 0x3f, 0x40, 0xc3, 0x68, 0x14, 0x24, 0x57, 0x15, 0xa2, 0x8e, 0x24, 0xcf, 0xc1, 0xb7,
0x18, 0x1f, 0x83, 0x9f, 0xc9, 0x52, 0x18, 0xab, 0xc0, 0xa3, 0x0e, 0xe0, 0x18, 0x42, 0x96, 0xa7,
0x85, 0x66, 0x73, 0x2b, 0x00, 0xd1, 0x06, 0x92, 0x6f, 0x08, 0x60, 0x63, 0xc9, 0x2e, 0x07, 0x16,
0xb6, 0xbb, 0x8d, 0x03, 0xae, 0x5b, 0xda, 0xf0, 0xf8, 0x1e, 0x04, 0xce, 0x70, 0xdb, 0x46, 0x6b,
0x0e, 0x35, 0xbd, 0x51, 0xdf, 0xfd, 0x93, 0xfa, 0x1f, 0x08, 0x02, 0x27, 0xe2, 0x7f, 0xbe, 0xbc,
0x6a, 0x8a, 0xcd, 0x97, 0x57, 0x9d, 0xf1, 0x13, 0x80, 0xb5, 0xbf, 0xcd, 0xe0, 0xfb, 0xf5, 0xd4,
0x92, 0x71, 0x73, 0x43, 0x5b, 0x41, 0x27, 0xcf, 0xa0, 0x37, 0x6e, 0x8f, 0xe4, 0x37, 0x13, 0x8e,
0xc1, 0x5f, 0xa5, 0x79, 0xe9, 0x06, 0xe8, 0x51, 0x07, 0xc8, 0x05, 0x04, 0x94, 0xe9, 0x32, 0xb7,
0x0e, 0xeb, 0x32, 0xcb, 0x98, 0xd6, 0x36, 0x6d, 0x9f, 0x36, 0xb0, 0xca, 0x64, 0x4a, 0x49, 0x55,
0x8b, 0x77, 0x80, 0xf4, 0x20, 0x9c, 0xe4, 0x13, 0x51, 0x94, 0x86, 0xdc, 0x81, 0xde, 0x7b, 0xbe,
0x64, 0xda, 0xa4, 0xcb, 0xa2, 0xea, 0xdf, 0xe8, 0x7a, 0x78, 0x1d, 0xa3, 0xc9, 0x0b, 0x38, 0xbc,
0x94, 0x42, 0xb0, 0xcc, 0x70, 0x29, 0x26, 0x62, 0x21, 0xf1, 0x43, 0xe8, 0x67, 0x6b, 0xe6, 0x83,
0x36, 0x8a, 0x8b, 0xeb, 0x5a, 0xea, 0xed, 0xcd, 0xc5, 0x95, 0xe5, 0xc9, 0x0d, 0x44, 0x63, 0xad,
0xf9, 0xb5, 0x58, 0x32, 0x61, 0xb6, 0x16, 0x06, 0xed, 0x58, 0x98, 0x0b, 0x38, 0x6a, 0x95, 0xe7,
0x62, 0x21, 0xad, 0xf0, 0x68, 0x74, 0x94, 0x6c, 0x0b, 0xa1, 0x87, 0xd9, 0x16, 0x1e, 0xfd, 0x44,
0xe0, 0x8d, 0xa7, 0x13, 0x7c, 0x0e, 0xd1, 0xa5, 0x62, 0xa9, 0x61, 0x76, 0xb5, 0xf1, 0x7e, 0x52,
0xff, 0x55, 0x4e, 0x0e, 0x92, 0xd6, 0xb2, 0x93, 0x3d, 0xfc, 0x00, 0xa2, 0x37, 0x5c, 0x1b, 0x4b,
0x32, 0xbd, 0x3b, 0xf0, 0x31, 0xc2, 0xf7, 0x21, 0x7a, 0xc5, 0x72, 0xd6, 0xd4, 0xdc, 0x0a, 0x38,
0x09, 0x13, 0x37, 0x04, 0xb2, 0x87, 0x1f, 0x41, 0xdf, 0xbd, 0xdd, 0xee, 0xfa, 0x20, 0x69, 0xa1,
0x76, 0xf4, 0x39, 0xf4, 0x5d, 0xd5, 0x76, 0x74, 0x63, 0x49, 0x2b, 0x70, 0x16, 0xd8, 0x3f, 0xe4,
0xd3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x04, 0x1a, 0xf8, 0x0a, 0x32, 0x05, 0x00, 0x00,
}

View File

@ -1,4 +0,0 @@
/*
backend is a package compiled from the protobuffer in <REPO_ROOT>/api/protobuf-spec/backend.proto. It is auto-generated and shouldn't be edited.
*/
package backend

View File

@ -0,0 +1,9 @@
FROM open-match-base-build as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/frontendapi .
ENTRYPOINT ["/frontendapi"]

View File

@ -1,300 +0,0 @@
/*
package apisrv provides an implementation of the gRPC server defined in ../../../api/protobuf-spec/frontend.proto.
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apisrv
import (
"context"
"errors"
"net"
"time"
frontend "github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/proto"
"github.com/GoogleCloudPlatform/open-match/internal/metrics"
playerq "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis/playerq"
log "github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"github.com/gomodule/redigo/redis"
"github.com/spf13/viper"
"go.opencensus.io/plugin/ocgrpc"
"google.golang.org/grpc"
)
// Logrus structured logging setup
var (
feLogFields = log.Fields{
"app": "openmatch",
"component": "frontend",
"caller": "frontendapi/apisrv/apisrv.go",
}
feLog = log.WithFields(feLogFields)
)
// FrontendAPI implements frontend.ApiServer, the server generated by compiling
// the protobuf, by fulfilling the frontend.APIClient interface.
type FrontendAPI struct {
grpc *grpc.Server
cfg *viper.Viper
pool *redis.Pool
}
type frontendAPI FrontendAPI
// New returns an instantiated srvice
func New(cfg *viper.Viper, pool *redis.Pool) *FrontendAPI {
s := FrontendAPI{
pool: pool,
grpc: grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})),
cfg: cfg,
}
// Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus
log.AddHook(metrics.NewHook(FeLogLines, KeySeverity))
// Register gRPC server
frontend.RegisterAPIServer(s.grpc, (*frontendAPI)(&s))
feLog.Info("Successfully registered gRPC server")
return &s
}
// Open starts the api grpc service listening on the configured port.
func (s *FrontendAPI) Open() error {
ln, err := net.Listen("tcp", ":"+s.cfg.GetString("api.frontend.port"))
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
"port": s.cfg.GetInt("api.frontend.port"),
}).Error("net.Listen() error")
return err
}
feLog.WithFields(log.Fields{"port": s.cfg.GetInt("api.frontend.port")}).Info("TCP net listener initialized")
go func() {
err := s.grpc.Serve(ln)
if err != nil {
feLog.WithFields(log.Fields{"error": err.Error()}).Error("gRPC serve() error")
}
feLog.Info("serving gRPC endpoints")
}()
return nil
}
// CreateRequest is this service's implementation of the CreateRequest gRPC method // defined in ../proto/frontend.proto
func (s *frontendAPI) CreateRequest(c context.Context, g *frontend.Group) (*frontend.Result, error) {
// Get redis connection from pool
redisConn := s.pool.Get()
defer redisConn.Close()
// Create context for tagging OpenCensus metrics.
funcName := "CreateRequest"
fnCtx, _ := tag.New(c, tag.Insert(KeyMethod, funcName))
// Write group
// TODO: Remove playerq module and just use redishelper module once
// indexing has its own implementation
err := playerq.Create(redisConn, g.Id, g.Properties)
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
"component": "statestorage",
}).Error("State storage error")
stats.Record(fnCtx, FeGrpcErrors.M(1))
return &frontend.Result{Success: false, Error: err.Error()}, err
}
stats.Record(fnCtx, FeGrpcRequests.M(1))
return &frontend.Result{Success: true, Error: ""}, err
}
// DeleteRequest is this service's implementation of the DeleteRequest gRPC method defined in
// frontendapi/proto/frontend.proto
func (s *frontendAPI) DeleteRequest(c context.Context, g *frontend.Group) (*frontend.Result, error) {
// Get redis connection from pool
redisConn := s.pool.Get()
defer redisConn.Close()
// Create context for tagging OpenCensus metrics.
funcName := "DeleteRequest"
fnCtx, _ := tag.New(c, tag.Insert(KeyMethod, funcName))
// Write group
err := playerq.Delete(redisConn, g.Id)
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
"component": "statestorage",
}).Error("State storage error")
stats.Record(fnCtx, FeGrpcErrors.M(1))
return &frontend.Result{Success: false, Error: err.Error()}, err
}
stats.Record(fnCtx, FeGrpcRequests.M(1))
return &frontend.Result{Success: true, Error: ""}, err
}
// GetAssignment is this service's implementation of the GetAssignment gRPC method defined in
// frontendapi/proto/frontend.proto
func (s *frontendAPI) GetAssignment(c context.Context, p *frontend.PlayerId) (*frontend.ConnectionInfo, error) {
// Get cancellable context
ctx, cancel := context.WithCancel(c)
defer cancel()
// Create context for tagging OpenCensus metrics.
funcName := "GetAssignment"
fnCtx, _ := tag.New(ctx, tag.Insert(KeyMethod, funcName))
// get and return connection string
var connString string
watchChan := s.watcher(ctx, s.pool, p.Id) // watcher() runs the appropriate Redis commands.
select {
case <-time.After(30 * time.Second): // TODO: Make this configurable.
err := errors.New("did not see matchmaking results in redis before timeout")
// TODO:Timeout: deal with the fallout
// When there is a timeout, need to send a stop to the watch channel.
// cancelling ctx isn't doing it.
//cancel()
feLog.WithFields(log.Fields{
"error": err.Error(),
"component": "statestorage",
"playerid": p.Id,
}).Error("State storage error")
errTag, _ := tag.NewKey("errtype")
fnCtx, _ := tag.New(ctx, tag.Insert(errTag, "watch_timeout"))
stats.Record(fnCtx, FeGrpcErrors.M(1))
return &frontend.ConnectionInfo{ConnectionString: ""}, err
case connString = <-watchChan:
feLog.Debug(p.Id, "connString:", connString)
}
stats.Record(fnCtx, FeGrpcRequests.M(1))
return &frontend.ConnectionInfo{ConnectionString: connString}, nil
}
// DeleteAssignment is this service's implementation of the DeleteAssignment gRPC method defined in
// frontendapi/proto/frontend.proto
func (s *frontendAPI) DeleteAssignment(c context.Context, p *frontend.PlayerId) (*frontend.Result, error) {
// Get redis connection from pool
redisConn := s.pool.Get()
defer redisConn.Close()
// Create context for tagging OpenCensus metrics.
funcName := "DeleteAssignment"
fnCtx, _ := tag.New(c, tag.Insert(KeyMethod, funcName))
// Write group
err := playerq.Delete(redisConn, p.Id)
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
"component": "statestorage",
}).Error("State storage error")
stats.Record(fnCtx, FeGrpcErrors.M(1))
return &frontend.Result{Success: false, Error: err.Error()}, err
}
stats.Record(fnCtx, FeGrpcRequests.M(1))
return &frontend.Result{Success: true, Error: ""}, err
}
//TODO: Everything below this line will be moved to the redis statestorage library
// in an upcoming version.
// ================================================
// watcher makes a channel and returns it immediately. It also launches an
// asynchronous goroutine that watches a redis key and returns the value of
// the 'connstring' field of that key once it exists on the channel.
//
// The pattern for this function is from 'Go Concurrency Patterns', it is a function
// that wraps a closure goroutine, and returns a channel.
// reference: https://talks.golang.org/2012/concurrency.slide#25
func (s *frontendAPI) watcher(ctx context.Context, pool *redis.Pool, key string) <-chan string {
// Add the key as a field to all logs for the execution of this function.
feLog = feLog.WithFields(log.Fields{"key": key})
feLog.Debug("Watching key in statestorage for changes")
watchChan := make(chan string)
go func() {
// var declaration
var results string
var err = errors.New("haven't queried Redis yet")
// Loop, querying redis until this key has a value
for err != nil {
select {
case <-ctx.Done():
// Cleanup
close(watchChan)
return
default:
results, err = s.retrieveConnstring(ctx, pool, key, s.cfg.GetString("jsonkeys.connstring"))
if err != nil {
time.Sleep(5 * time.Second) // TODO: exp bo + jitter
}
}
}
// Return value retreived from Redis asynchonously and tell calling function we're done
feLog.Debug("Statestorage watched record update detected")
watchChan <- results
close(watchChan)
}()
return watchChan
}
// retrieveConnstring is a concurrent-safe, context-aware redis HGET of the 'connstring' fieldin the input key
// TODO: This will be moved to the redis statestorage module.
func (s *frontendAPI) retrieveConnstring(ctx context.Context, pool *redis.Pool, key string, field string) (string, error) {
// Add the key as a field to all logs for the execution of this function.
feLog = feLog.WithFields(log.Fields{"key": key})
cmd := "HGET"
feLog.WithFields(log.Fields{"query": cmd}).Debug("Statestorage operation")
// Get a connection to redis
redisConn, err := pool.GetContext(ctx)
defer redisConn.Close()
// Encountered an issue getting a connection from the pool.
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
"query": cmd}).Error("Statestorage connection error")
return "", err
}
// Run redis query and return
return redis.String(redisConn.Do("HGET", key, field))
}

View File

@ -0,0 +1,8 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-frontendapi:0.4',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-frontendapi:0.4']

View File

@ -1,7 +1,7 @@
/*
This application handles all the startup and connection scaffolding for
running a gRPC server serving the APIService as defined in
frontendapi/proto/frontend.pb.go
${OM_ROOT}/internal/pb/frontend.pb.go
All the actual important bits are in the API Server source code: apisrv/apisrv.go
@ -19,87 +19,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"errors"
"os"
"os/signal"
"github.com/GoogleCloudPlatform/open-match/cmd/frontendapi/apisrv"
"github.com/GoogleCloudPlatform/open-match/config"
"github.com/GoogleCloudPlatform/open-match/internal/metrics"
redishelpers "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"go.opencensus.io/plugin/ocgrpc"
"github.com/GoogleCloudPlatform/open-match/internal/app/frontendapi"
)
var (
// Logrus structured logging setup
feLogFields = log.Fields{
"app": "openmatch",
"component": "frontend",
"caller": "frontendapi/main.go",
}
feLog = log.WithFields(feLogFields)
// Viper config management setup
cfg = viper.New()
err = errors.New("")
)
func init() {
// Logrus structured logging initialization
// Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus
log.AddHook(metrics.NewHook(apisrv.FeLogLines, apisrv.KeySeverity))
// Viper config management initialization
cfg, err = config.Read()
if err != nil {
feLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Unable to load config file")
}
if cfg.GetBool("debug") == true {
log.SetLevel(log.DebugLevel) // debug only, verbose - turn off in production!
feLog.Warn("Debug logging configured. Not recommended for production!")
}
// Configure OpenCensus exporter to Prometheus
// metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you
// want to register is in an array, so append any views you want from other
// packages to a single array here.
ocServerViews := apisrv.DefaultFrontendAPIViews // FrontendAPI OpenCensus views.
ocServerViews = append(ocServerViews, ocgrpc.DefaultServerViews...) // gRPC OpenCensus views.
ocServerViews = append(ocServerViews, config.CfgVarCountView) // config loader view.
// Waiting on https://github.com/opencensus-integrations/redigo/pull/1
// ocServerViews = append(ocServerViews, redis.ObservabilityMetricViews...) // redis OpenCensus views.
feLog.WithFields(log.Fields{"viewscount": len(ocServerViews)}).Info("Loaded OpenCensus views")
metrics.ConfigureOpenCensusPrometheusExporter(cfg, ocServerViews)
}
func main() {
// Connect to redis
pool := redishelpers.ConnectionPool(cfg)
defer pool.Close()
// Instantiate the gRPC server with the connections we've made
feLog.WithFields(log.Fields{"testfield": "test"}).Info("Attempting to start gRPC server")
srv := apisrv.New(cfg, pool)
// Run the gRPC server
err := srv.Open()
if err != nil {
feLog.WithFields(log.Fields{"error": err.Error()}).Fatal("Failed to start gRPC server")
}
// Exit when we see a signal
terminate := make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt)
<-terminate
feLog.Info("Shutting down gRPC server")
frontendapi.RunApplication()
}

View File

@ -1 +0,0 @@
../../config/matchmaker_config.json

View File

@ -0,0 +1 @@
../../config/matchmaker_config.yaml

View File

@ -1,4 +0,0 @@
/*
frontend is a package compiled from the protobuffer in <REPO_ROOT>/api/protobuf-spec/frontend.proto. It is auto-generated and shouldn't be edited.
*/
package frontend

View File

@ -1,335 +0,0 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: frontend.proto
/*
Package frontend is a generated protocol buffer package.
It is generated from these files:
frontend.proto
It has these top-level messages:
Group
PlayerId
ConnectionInfo
Result
*/
package frontend
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Data structure for a group of players to pass to the matchmaking function.
// Obviously, the group can be a group of one!
type Group struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Properties string `protobuf:"bytes,2,opt,name=properties" json:"properties,omitempty"`
}
func (m *Group) Reset() { *m = Group{} }
func (m *Group) String() string { return proto.CompactTextString(m) }
func (*Group) ProtoMessage() {}
func (*Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Group) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Group) GetProperties() string {
if m != nil {
return m.Properties
}
return ""
}
type PlayerId struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
}
func (m *PlayerId) Reset() { *m = PlayerId{} }
func (m *PlayerId) String() string { return proto.CompactTextString(m) }
func (*PlayerId) ProtoMessage() {}
func (*PlayerId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *PlayerId) GetId() string {
if m != nil {
return m.Id
}
return ""
}
// Simple message used to pass the connection string for the DGS to the player.
type ConnectionInfo struct {
ConnectionString string `protobuf:"bytes,1,opt,name=connection_string,json=connectionString" json:"connection_string,omitempty"`
}
func (m *ConnectionInfo) Reset() { *m = ConnectionInfo{} }
func (m *ConnectionInfo) String() string { return proto.CompactTextString(m) }
func (*ConnectionInfo) ProtoMessage() {}
func (*ConnectionInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *ConnectionInfo) GetConnectionString() string {
if m != nil {
return m.ConnectionString
}
return ""
}
// Simple message to return success/failure and error status.
type Result struct {
Success bool `protobuf:"varint,1,opt,name=success" json:"success,omitempty"`
Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
}
func (m *Result) Reset() { *m = Result{} }
func (m *Result) String() string { return proto.CompactTextString(m) }
func (*Result) ProtoMessage() {}
func (*Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Result) GetSuccess() bool {
if m != nil {
return m.Success
}
return false
}
func (m *Result) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func init() {
proto.RegisterType((*Group)(nil), "Group")
proto.RegisterType((*PlayerId)(nil), "PlayerId")
proto.RegisterType((*ConnectionInfo)(nil), "ConnectionInfo")
proto.RegisterType((*Result)(nil), "Result")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for API service
type APIClient interface {
CreateRequest(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Result, error)
DeleteRequest(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Result, error)
GetAssignment(ctx context.Context, in *PlayerId, opts ...grpc.CallOption) (*ConnectionInfo, error)
DeleteAssignment(ctx context.Context, in *PlayerId, opts ...grpc.CallOption) (*Result, error)
}
type aPIClient struct {
cc *grpc.ClientConn
}
func NewAPIClient(cc *grpc.ClientConn) APIClient {
return &aPIClient{cc}
}
func (c *aPIClient) CreateRequest(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/CreateRequest", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) DeleteRequest(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/DeleteRequest", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) GetAssignment(ctx context.Context, in *PlayerId, opts ...grpc.CallOption) (*ConnectionInfo, error) {
out := new(ConnectionInfo)
err := grpc.Invoke(ctx, "/API/GetAssignment", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *aPIClient) DeleteAssignment(ctx context.Context, in *PlayerId, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/API/DeleteAssignment", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for API service
type APIServer interface {
CreateRequest(context.Context, *Group) (*Result, error)
DeleteRequest(context.Context, *Group) (*Result, error)
GetAssignment(context.Context, *PlayerId) (*ConnectionInfo, error)
DeleteAssignment(context.Context, *PlayerId) (*Result, error)
}
func RegisterAPIServer(s *grpc.Server, srv APIServer) {
s.RegisterService(&_API_serviceDesc, srv)
}
func _API_CreateRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Group)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).CreateRequest(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/CreateRequest",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).CreateRequest(ctx, req.(*Group))
}
return interceptor(ctx, in, info, handler)
}
func _API_DeleteRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Group)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).DeleteRequest(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/DeleteRequest",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).DeleteRequest(ctx, req.(*Group))
}
return interceptor(ctx, in, info, handler)
}
func _API_GetAssignment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PlayerId)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).GetAssignment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/GetAssignment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).GetAssignment(ctx, req.(*PlayerId))
}
return interceptor(ctx, in, info, handler)
}
func _API_DeleteAssignment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PlayerId)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(APIServer).DeleteAssignment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/API/DeleteAssignment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(APIServer).DeleteAssignment(ctx, req.(*PlayerId))
}
return interceptor(ctx, in, info, handler)
}
var _API_serviceDesc = grpc.ServiceDesc{
ServiceName: "API",
HandlerType: (*APIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateRequest",
Handler: _API_CreateRequest_Handler,
},
{
MethodName: "DeleteRequest",
Handler: _API_DeleteRequest_Handler,
},
{
MethodName: "GetAssignment",
Handler: _API_GetAssignment_Handler,
},
{
MethodName: "DeleteAssignment",
Handler: _API_DeleteAssignment_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "frontend.proto",
}
func init() { proto.RegisterFile("frontend.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 260 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x41, 0x4b, 0xfb, 0x40,
0x10, 0xc5, 0x9b, 0xfc, 0x69, 0xda, 0x0e, 0x34, 0xff, 0xba, 0x78, 0x08, 0x39, 0x88, 0xec, 0xa9,
0x20, 0xee, 0x41, 0x0f, 0x7a, 0xf1, 0x50, 0x2a, 0x94, 0xdc, 0x4a, 0xfc, 0x00, 0x52, 0x93, 0x69,
0x59, 0x88, 0xbb, 0x71, 0x66, 0x72, 0xf0, 0x0b, 0xf9, 0x39, 0xc5, 0x4d, 0x6b, 0x55, 0xc4, 0xe3,
0xfb, 0xed, 0x7b, 0x8f, 0x7d, 0x03, 0xe9, 0x96, 0xbc, 0x13, 0x74, 0xb5, 0x69, 0xc9, 0x8b, 0xd7,
0x37, 0x30, 0x5c, 0x91, 0xef, 0x5a, 0x95, 0x42, 0x6c, 0xeb, 0x2c, 0x3a, 0x8f, 0xe6, 0x93, 0x32,
0xb6, 0xb5, 0x3a, 0x03, 0x68, 0xc9, 0xb7, 0x48, 0x62, 0x91, 0xb3, 0x38, 0xf0, 0x2f, 0x44, 0xe7,
0x30, 0x5e, 0x37, 0x9b, 0x57, 0xa4, 0xa2, 0xfe, 0x99, 0xd5, 0x77, 0x90, 0x2e, 0xbd, 0x73, 0x58,
0x89, 0xf5, 0xae, 0x70, 0x5b, 0xaf, 0x2e, 0xe0, 0xa4, 0xfa, 0x24, 0x8f, 0x2c, 0x64, 0xdd, 0x6e,
0x1f, 0x98, 0x1d, 0x1f, 0x1e, 0x02, 0xd7, 0xb7, 0x90, 0x94, 0xc8, 0x5d, 0x23, 0x2a, 0x83, 0x11,
0x77, 0x55, 0x85, 0xcc, 0xc1, 0x3c, 0x2e, 0x0f, 0x52, 0x9d, 0xc2, 0x10, 0x89, 0x3c, 0xed, 0x7f,
0xd6, 0x8b, 0xab, 0xb7, 0x08, 0xfe, 0x2d, 0xd6, 0x85, 0xd2, 0x30, 0x5d, 0x12, 0x6e, 0x04, 0x4b,
0x7c, 0xe9, 0x90, 0x45, 0x25, 0x26, 0xac, 0xcc, 0x47, 0xa6, 0x6f, 0xd6, 0x83, 0x0f, 0xcf, 0x3d,
0x36, 0xf8, 0xa7, 0xe7, 0x12, 0xa6, 0x2b, 0x94, 0x05, 0xb3, 0xdd, 0xb9, 0x67, 0x74, 0xa2, 0x26,
0xe6, 0x30, 0x3a, 0xff, 0x6f, 0xbe, 0x6f, 0xd4, 0x03, 0x35, 0x87, 0x59, 0x5f, 0xf9, 0x7b, 0xe2,
0x58, 0xfc, 0x94, 0x84, 0xeb, 0x5f, 0xbf, 0x07, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xde, 0x2c, 0x5b,
0x8f, 0x01, 0x00, 0x00,
}

9
cmd/mmforc/Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM open-match-base-build as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmforc/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmforc/mmforc .
ENTRYPOINT ["/mmforc"]

View File

@ -0,0 +1,8 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmforc:0.4',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmforc:0.4']

View File

@ -17,392 +17,13 @@ limitations under the License.
// Note: the example only works with the code within the same release/branch.
// This is based on the example from the official k8s golang client repository:
// k8s.io/client-go/examples/create-update-delete-deployment/
package main
import (
"context"
"errors"
"os"
"strconv"
"strings"
"time"
"github.com/GoogleCloudPlatform/open-match/config"
"github.com/GoogleCloudPlatform/open-match/internal/metrics"
redisHelpers "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis"
"github.com/tidwall/gjson"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"github.com/gomodule/redigo/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
batchv1 "k8s.io/api/batch/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
//"k8s.io/kubernetes/pkg/api"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"github.com/GoogleCloudPlatform/open-match/internal/app/mmforc"
)
var (
// Logrus structured logging setup
mmforcLogFields = log.Fields{
"app": "openmatch",
"component": "mmforc",
"caller": "mmforc/main.go",
}
mmforcLog = log.WithFields(mmforcLogFields)
// Viper config management setup
cfg = viper.New()
err = errors.New("")
)
func init() {
// Logrus structured logging initialization
// Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus
log.SetFormatter(&log.JSONFormatter{})
log.AddHook(metrics.NewHook(MmforcLogLines, KeySeverity))
// Viper config management initialization
cfg, err = config.Read()
if err != nil {
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Unable to load config file")
}
if cfg.GetBool("debug") == true {
log.SetLevel(log.DebugLevel) // debug only, verbose - turn off in production!
mmforcLog.Warn("Debug logging configured. Not recommended for production!")
}
// Configure OpenCensus exporter to Prometheus
// metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you
// want to register is in an array, so append any views you want from other
// packages to a single array here.
ocMmforcViews := DefaultMmforcViews // mmforc OpenCensus views.
// Waiting on https://github.com/opencensus-integrations/redigo/pull/1
// ocMmforcViews = append(ocMmforcViews, redis.ObservabilityMetricViews...) // redis OpenCensus views.
mmforcLog.WithFields(log.Fields{"viewscount": len(ocMmforcViews)}).Info("Loaded OpenCensus views")
metrics.ConfigureOpenCensusPrometheusExporter(cfg, ocMmforcViews)
}
func main() {
pool := redisHelpers.ConnectionPool(cfg)
redisConn := pool.Get()
defer redisConn.Close()
// Get k8s credentials so we can starts k8s Jobs
mmforcLog.Info("Attempting to acquire k8s credentials")
config, err := rest.InClusterConfig()
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
mmforcLog.Info("K8s credentials acquired")
start := time.Now()
checkProposals := true
// main loop; kick off matchmaker functions for profiles in the profile
// queue and an evaluator when proposals are in the proposals queue
for {
ctx, cancel := context.WithCancel(context.Background())
_ = cancel
// Get profiles and kick off a job for each
mmforcLog.WithFields(log.Fields{
"profileQueueName": cfg.GetString("queues.profiles.name"),
"pullCount": cfg.GetInt("queues.profiles.pullCount"),
"query": "SPOP",
"component": "statestorage",
}).Debug("Retreiving match profiles")
results, err := redis.Strings(redisConn.Do("SPOP",
cfg.GetString("queues.profiles.name"), cfg.GetInt("queues.profiles.pullCount")))
if err != nil {
panic(err)
}
if len(results) > 0 {
mmforcLog.WithFields(log.Fields{
"numProfiles": len(results),
}).Info("Starting MMF jobs...")
for _, profile := range results {
// Kick off the job asynchrnously
go mmfunc(ctx, profile, cfg, clientset, pool)
// Count the number of jobs running
redisHelpers.Increment(context.Background(), pool, "concurrentMMFs")
}
} else {
mmforcLog.WithFields(log.Fields{
"profileQueueName": cfg.GetString("queues.profiles.name"),
}).Info("Unable to retreive match profiles from statestorage - have you entered any?")
}
// Check to see if we should run the evaluator.
// Get number of running MMFs
r, err := redisHelpers.Retrieve(context.Background(), pool, "concurrentMMFs")
if err != nil {
if err.Error() == "redigo: nil returned" {
// No MMFs have run since we last evaluated; reset timer and loop
mmforcLog.Debug("Number of concurrentMMFs is nil")
start = time.Now()
time.Sleep(1000 * time.Millisecond)
}
continue
}
numRunning, err := strconv.Atoi(r)
if err != nil {
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Issue retrieving number of currently running MMFs")
}
// We are ready to evaluate either when all MMFs are complete, or the
// timeout is reached.
//
// Tuning how frequently the evaluator runs is a complex topic and
// probably only of interest to users running large-scale production
// workloads with many concurrently running matchmaking functions,
// which have some overlap in the matchmaking player pools. Suffice to
// say that under load, this switch should almost always trigger the
// timeout interval code path. The concurrentMMFs check to see how
// many are still running is meant as a deadman's switch to prevent
// waiting to run the evaluator when all your MMFs are already
// finished.
switch {
case time.Since(start).Seconds() >= float64(cfg.GetInt("interval.evaluator")):
mmforcLog.WithFields(log.Fields{
"interval": cfg.GetInt("interval.evaluator"),
}).Info("Maximum evaluator interval exceeded")
checkProposals = true
// Opencensus tagging
ctx, _ = tag.New(ctx, tag.Insert(KeyEvalReason, "interval_exceeded"))
case numRunning <= 0:
mmforcLog.Info("All MMFs complete")
checkProposals = true
numRunning = 0
ctx, _ = tag.New(ctx, tag.Insert(KeyEvalReason, "mmfs_completed"))
}
if checkProposals {
// Make sure there are proposals in the queue. No need to run the
// evaluator if there are none.
checkProposals = false
mmforcLog.Info("Checking statestorage for match object proposals")
results, err := redisHelpers.Count(context.Background(), pool, cfg.GetString("queues.proposals.name"))
switch {
case err != nil:
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Couldn't retrieve the length of the proposal queue from statestorage!")
case results == 0:
mmforcLog.WithFields(log.Fields{}).Warn("No proposals in the queue!")
default:
mmforcLog.WithFields(log.Fields{
"numProposals": results,
}).Info("Proposals available, evaluating!")
go evaluator(ctx, cfg, clientset)
}
_, err = redisHelpers.Delete(context.Background(), pool, "concurrentMMFs")
if err != nil {
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Error deleting concurrent MMF counter!")
}
start = time.Now()
}
// TODO: Make this tunable via config.
// A sleep here is not critical but just a useful safety valve in case
// things are broken, to keep the main loop from going all-out and spamming the log.
mainSleep := 1000
mmforcLog.WithFields(log.Fields{
"ms": mainSleep,
}).Info("Sleeping...")
time.Sleep(time.Duration(mainSleep) * time.Millisecond)
} // End main for loop
mmforc.RunApplication()
}
// mmfunc generates a k8s job that runs the specified mmf container image.
// resultsID is the redis key that the Backend API is monitoring for results; we can 'short circuit' and write errors directly to this key if we can't run the MMF for some reason.
func mmfunc(ctx context.Context, resultsID string, cfg *viper.Viper, clientset *kubernetes.Clientset, pool *redis.Pool) {
// Generate the various keys/names, some of which must be populated to the k8s job.
imageName := cfg.GetString("defaultImages.mmf.name") + ":" + cfg.GetString("defaultImages.mmf.tag")
jobType := "mmf"
ids := strings.Split(resultsID, ".") // comes in as dot-concatinated moID and profID.
moID := ids[0]
profID := ids[1]
timestamp := strconv.Itoa(int(time.Now().Unix()))
jobName := timestamp + "." + moID + "." + profID + "." + jobType
propID := "proposal." + timestamp + "." + moID + "." + profID
// Extra fields for structured logging
lf := log.Fields{"jobName": jobName}
if cfg.GetBool("debug") { // Log a lot more info.
lf = log.Fields{
"jobType": jobType,
"backendMatchObject": moID,
"profile": profID,
"jobTimestamp": timestamp,
"containerImage": imageName,
"jobName": jobName,
"profileImageJSONKey": cfg.GetString("jsonkeys.mmfImage"),
}
}
mmfuncLog := mmforcLog.WithFields(lf)
// Read the full profile from redis and access any keys that are important to deciding how MMFs are run.
// TODO: convert this to using redispb and directly access the protobuf message instead of retrieving as a map?
profile, err := redisHelpers.RetrieveAll(ctx, pool, profID)
if err != nil {
// Log failure to read this profile and return - won't run an MMF for an unreadable profile.
mmfuncLog.WithFields(log.Fields{"error": err.Error()}).Error("Failure retreiving profile from statestorage")
return
}
// Got profile from state storage, make sure it is valid
if gjson.Valid(profile["properties"]) {
profileImage := gjson.Get(profile["properties"], cfg.GetString("jsonkeys.mmfImage"))
if profileImage.Exists() {
imageName = profileImage.String()
mmfuncLog = mmfuncLog.WithFields(log.Fields{"containerImage": imageName})
} else {
mmfuncLog.Warn("Failed to read image name from profile at configured json key, using default image instead")
}
}
mmfuncLog.Info("Attempting to create mmf k8s job")
// Kick off k8s job
envvars := []apiv1.EnvVar{
{Name: "MMF_PROFILE_ID", Value: profID},
{Name: "MMF_PROPOSAL_ID", Value: propID},
{Name: "MMF_REQUEST_ID", Value: moID},
{Name: "MMF_ERROR_ID", Value: resultsID},
{Name: "MMF_TIMESTAMP", Value: timestamp},
}
err = submitJob(clientset, jobType, jobName, imageName, envvars)
if err != nil {
// Record failure & log
stats.Record(ctx, mmforcMmfFailures.M(1))
mmfuncLog.WithFields(log.Fields{"error": err.Error()}).Error("MMF job submission failure!")
} else {
// Record Success
stats.Record(ctx, mmforcMmfs.M(1))
}
}
// evaluator generates a k8s job that runs the specified evaluator container image.
func evaluator(ctx context.Context, cfg *viper.Viper, clientset *kubernetes.Clientset) {
imageName := cfg.GetString("defaultImages.evaluator.name") + ":" + cfg.GetString("defaultImages.evaluator.tag")
// Generate the job name
timestamp := strconv.Itoa(int(time.Now().Unix()))
jobType := "evaluator"
jobName := timestamp + "." + jobType
mmforcLog.WithFields(log.Fields{
"jobName": jobName,
"containerImage": imageName,
}).Info("Attempting to create evaluator k8s job")
// Kick off k8s job
envvars := []apiv1.EnvVar{{Name: "MMF_TIMESTAMP", Value: timestamp}}
err = submitJob(clientset, jobType, jobName, imageName, envvars)
if err != nil {
// Record failure & log
stats.Record(ctx, mmforcEvalFailures.M(1))
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
"jobName": jobName,
"containerImage": imageName,
}).Error("Evaluator job submission failure!")
} else {
// Record success
stats.Record(ctx, mmforcEvals.M(1))
}
}
// submitJob submits a job to kubernetes
func submitJob(clientset *kubernetes.Clientset, jobType string, jobName string, imageName string, envvars []apiv1.EnvVar) error {
// DEPRECATED: will be removed in a future vrsion. Please switch to using the 'MMF_*' environment variables.
v := strings.Split(jobName, ".")
envvars = append(envvars, apiv1.EnvVar{Name: "PROFILE", Value: strings.Join(v[:len(v)-1], ".")})
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
},
Spec: batchv1.JobSpec{
Completions: int32Ptr(1),
Template: apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": jobType,
},
Annotations: map[string]string{
// Unused; here as an example.
// Later we can put things more complicated than
// env vars here and read them using k8s downward API
// volumes
"profile": jobName,
},
},
Spec: apiv1.PodSpec{
RestartPolicy: "Never",
Containers: []apiv1.Container{
{
Name: jobType,
Image: imageName,
ImagePullPolicy: "Always",
Env: envvars,
},
},
},
},
},
}
// Get the namespace for the job from the current namespace, otherwise, use default
namespace := os.Getenv("METADATA_NAMESPACE")
if len(namespace) == 0 {
namespace = apiv1.NamespaceDefault
}
// Submit kubernetes job
jobsClient := clientset.BatchV1().Jobs(namespace)
result, err := jobsClient.Create(job)
if err != nil {
// TODO: replace queued profiles if things go south
mmforcLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Couldn't create k8s job!")
}
mmforcLog.WithFields(log.Fields{
"jobName": result.GetObjectMeta().GetName(),
}).Info("Created job.")
return err
}
// readability functions used by generateJobSpec
func int32Ptr(i int32) *int32 { return &i }
func strPtr(i string) *string { return &i }

View File

@ -1 +0,0 @@
../../config/matchmaker_config.json

View File

@ -0,0 +1 @@
../../config/matchmaker_config.yaml

View File

@ -0,0 +1,9 @@
FROM open-match-base-build as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmlogicapi/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/cmd/mmlogicapi/mmlogicapi .
ENTRYPOINT ["/mmlogicapi"]

View File

@ -0,0 +1,8 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-mmlogicapi:0.4',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-mmlogicapi:0.4']

View File

@ -1,7 +1,7 @@
/*
This application handles all the startup and connection scaffolding for
running a gRPC server serving the APIService as defined in
mmlogic/proto/mmlogic.pb.go
${OM_ROOT}/internal/pb/mmlogic.pb.go
All the actual important bits are in the API Server source code: apisrv/apisrv.go
@ -22,84 +22,9 @@ limitations under the License.
package main
import (
"errors"
"os"
"os/signal"
"github.com/GoogleCloudPlatform/open-match/cmd/mmlogicapi/apisrv"
"github.com/GoogleCloudPlatform/open-match/config"
"github.com/GoogleCloudPlatform/open-match/internal/metrics"
redisHelpers "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"go.opencensus.io/plugin/ocgrpc"
"github.com/GoogleCloudPlatform/open-match/internal/app/mmlogicapi"
)
var (
// Logrus structured logging setup
mlLogFields = log.Fields{
"app": "openmatch",
"component": "mmlogic",
"caller": "mmlogicapi/main.go",
}
mlLog = log.WithFields(mlLogFields)
// Viper config management setup
cfg = viper.New()
err = errors.New("")
)
func init() {
// Logrus structured logging initialization
// Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus
log.AddHook(metrics.NewHook(apisrv.MlLogLines, apisrv.KeySeverity))
// Viper config management initialization
cfg, err = config.Read()
if err != nil {
mlLog.WithFields(log.Fields{
"error": err.Error(),
}).Error("Unable to load config file")
}
if cfg.GetBool("debug") == true {
log.SetLevel(log.DebugLevel) // debug only, verbose - turn off in production!
mlLog.Warn("Debug logging configured. Not recommended for production!")
}
// Configure OpenCensus exporter to Prometheus
// metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you
// want to register is in an array, so append any views you want from other
// packages to a single array here.
ocServerViews := apisrv.DefaultMmlogicAPIViews // Matchmaking logic API OpenCensus views.
ocServerViews = append(ocServerViews, ocgrpc.DefaultServerViews...) // gRPC OpenCensus views.
ocServerViews = append(ocServerViews, config.CfgVarCountView) // config loader view.
// Waiting on https://github.com/opencensus-integrations/redigo/pull/1
// ocServerViews = append(ocServerViews, redis.ObservabilityMetricViews...) // redis OpenCensus views.
mlLog.WithFields(log.Fields{"viewscount": len(ocServerViews)}).Info("Loaded OpenCensus views")
metrics.ConfigureOpenCensusPrometheusExporter(cfg, ocServerViews)
}
func main() {
// Connect to redis
pool := redisHelpers.ConnectionPool(cfg)
defer pool.Close()
// Instantiate the gRPC server with the connections we've made
mlLog.WithFields(log.Fields{"testfield": "test"}).Info("Attempting to start gRPC server")
srv := apisrv.New(cfg, pool)
// Run the gRPC server
err := srv.Open()
if err != nil {
mlLog.WithFields(log.Fields{"error": err.Error()}).Fatal("Failed to start gRPC server")
}
// Exit when we see a signal
terminate := make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt)
<-terminate
mlLog.Info("Shutting down gRPC server")
mmlogicapi.RunApplication()
}

View File

@ -1 +0,0 @@
../../config/matchmaker_config.json

View File

@ -0,0 +1 @@
../../config/matchmaker_config.yaml

View File

@ -29,7 +29,6 @@ var (
logFields = log.Fields{
"app": "openmatch",
"component": "config",
"caller": "config/main.go",
}
cfgLog = log.WithFields(logFields)
@ -42,13 +41,20 @@ var (
// REDIS_SENTINEL_PORT_6379_TCP_PORT=6379
// REDIS_SENTINEL_PORT_6379_TCP_PROTO=tcp
// REDIS_SENTINEL_SERVICE_HOST=10.55.253.195
//
// MMFs are expected to get their configuation from env vars instead
// of reading the config file. So, config parameters that are required
// by MMFs should be populated to env vars.
envMappings = map[string]string{
"redis.hostname": "REDIS_SENTINEL_SERVICE_HOST",
"redis.port": "REDIS_SENTINEL_SERVICE_PORT",
"redis.user": "REDIS_USER",
"redis.password": "REDIS_PASSWORD",
"redis.hostname": "REDIS_SERVICE_HOST",
"redis.port": "REDIS_SERVICE_PORT",
"redis.pool.maxIdle": "REDIS_POOL_MAXIDLE",
"redis.pool.maxActive": "REDIS_POOL_MAXACTIVE",
"redis.pool.idleTimeout": "REDIS_POOL_IDLETIMEOUT",
"debug": "DEBUG",
"api.mmlogic.hostname": "OM_MMLOGICAPI_SERVICE_HOST",
"api.mmlogic.port": "OM_MMLOGICAPI_SERVICE_PORT",
}
// Viper config management setup
@ -70,9 +76,13 @@ var (
func Read() (*viper.Viper, error) {
// Viper config management initialization
// Support either json or yaml file types (json for backwards compatibility
// with previous versions)
cfg.SetConfigType("json")
cfg.SetConfigType("yaml")
cfg.SetConfigName("matchmaker_config")
cfg.AddConfigPath(".")
cfg.AddConfigPath("config")
// Read in config file using Viper
err := cfg.ReadInConfig()
@ -109,5 +119,11 @@ func Read() (*viper.Viper, error) {
}
// Look for updates to the config; in Kubernetes, this is implemented using
// a ConfigMap that is written to the matchmaker_config.yaml file, which is
// what the Open Match components using Viper monitor for changes.
// More details about Open Match's use of Kubernetes ConfigMaps at:
// https://github.com/GoogleCloudPlatform/open-match/issues/42
cfg.WatchConfig() // Watch and re-read config file.
return cfg, err
}

View File

@ -1,101 +0,0 @@
{
"debug": true,
"api": {
"backend": {
"hostname": "om-backendapi",
"port": 50505
},
"frontend": {
"hostname": "om-frontendapi",
"port": 50504
},
"mmlogic": {
"hostname": "om-mmlogicapi",
"port": 50503
}
},
"metrics": {
"port": 9555,
"endpoint": "/metrics",
"reportingPeriod": 5
},
"queues": {
"profiles": {
"name": "profileq",
"pullCount": 100
},
"proposals": {
"name": "proposalq"
}
},
"ignoreLists": {
"proposed": {
"name": "proposed",
"offset": 0,
"duration": 800
},
"deindexed": {
"name": "deindexed",
"offset": 0,
"duration": 800
},
"expired": {
"name": "timestamp",
"offset": 800,
"duration": 0
}
},
"defaultImages": {
"evaluator": {
"name": "gcr.io/matchmaker-dev-201405/openmatch-evaluator",
"tag": "dev"
},
"mmf": {
"name": "gcr.io/matchmaker-dev-201405/openmatch-mmf",
"tag": "py3"
}
},
"redis": {
"user": "",
"password": "",
"pool" : {
"maxIdle" : 3,
"maxActive" : 0,
"idleTimeout" : 60
},
"queryArgs":{
"count": 10000
},
"results": {
"pageSize": 10000
}
},
"jsonkeys": {
"mmfImage": "imagename",
"rosters": "properties.rosters",
"connstring": "connstring",
"pools": "properties.pools"
},
"interval": {
"evaluator": 10,
"resultsTimeout": 30
},
"playerIndices": [
"char.cleric",
"char.knight",
"char.paladin",
"map.aleroth",
"map.oasis",
"mmr.rating",
"mode.battleroyale",
"mode.ctf",
"region.europe-east1",
"region.europe-west1",
"region.europe-west2",
"region.europe-west3",
"region.europe-west4",
"role.dps",
"role.support",
"role.tank"
]
}

View File

@ -0,0 +1,97 @@
# kubectl create configmap om-configmap --from-file=config/matchmaker_config.yaml
debug: true
logging:
level: debug
format: text
source: true
api:
backend:
hostname: om-backendapi
port: 50505
backoff: "[2 32] *2 ~0.33 <30"
frontend:
hostname: om-frontendapi
port: 50504
backoff: "[2 32] *2 ~0.33 <300"
mmlogic:
hostname: om-mmlogicapi
port: 50503
functions:
port: 50502
evalutor:
interval: 10
metrics:
port: 9555
endpoint: /metrics
reportingPeriod: 5
queues:
profiles:
name: profileq
pullCount: 100
proposals:
name: proposalq
ignoreLists:
proposed:
name: proposed
offset: 0
duration: 800
deindexed:
name: deindexed
offset: 0
duration: 800
expired:
name: OM_METADATA.accessed
offset: 800
duration: 0
defaultImages:
evaluator:
name: gcr.io/matchmaker-dev-201405/openmatch-evaluator
tag: dev
mmf:
name: gcr.io/matchmaker-dev-201405/openmatch-mmf-py3-mmlogic-simple
tag: dev
redis:
pool:
maxIdle: 3
maxActive: 0
idleTimeout: 60
queryArgs:
count: 10000
results:
pageSize: 10000
expirations:
player: 43200
matchobject: 43200
jsonkeys:
mmfImage: imagename
mmfService: hostname
rosters: properties.rosters
pools: properties.pools
playerIndices:
- char.cleric
- char.knight
- char.paladin
- map.aleroth
- map.oasis
- mmr.rating
- mode.battleroyale
- mode.ctf
- mode.demo
- region.europe-east1
- region.europe-west1
- region.europe-west2
- region.europe-west3
- region.europe-west4
- role.dps
- role.support
- role.tank

View File

@ -1,53 +0,0 @@
{
"apiVersion":"extensions/v1beta1",
"kind":"Deployment",
"metadata":{
"name":"om-backendapi",
"labels":{
"app":"openmatch",
"component": "backend"
}
},
"spec":{
"replicas":1,
"selector":{
"matchLabels":{
"app":"openmatch",
"component": "backend"
}
},
"template":{
"metadata":{
"labels":{
"app":"openmatch",
"component": "backend"
}
},
"spec":{
"containers":[
{
"name":"om-backend",
"image":"gcr.io/matchmaker-dev-201405/openmatch-backendapi:dev",
"imagePullPolicy":"Always",
"ports": [
{
"name": "grpc",
"containerPort": 50505
},
{
"name": "metrics",
"containerPort": 9555
}
],
"resources":{
"requests":{
"memory":"100Mi",
"cpu":"100m"
}
}
}
]
}
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: om-backendapi
labels:
app: openmatch
component: backend
spec:
replicas: 1
selector:
matchLabels:
app: openmatch
component: backend
template:
metadata:
labels:
app: openmatch
component: backend
spec:
containers:
- name: om-backend
image: gcr.io/open-match-public-images/openmatch-backendapi:dev
imagePullPolicy: Always
ports:
- name: grpc
containerPort: 50505
- name: metrics
containerPort: 9555
resources:
requests:
memory: 100Mi
cpu: 100m

View File

@ -1,20 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-backendapi"
},
"spec": {
"selector": {
"app": "openmatch",
"component": "backend"
},
"ports": [
{
"protocol": "TCP",
"port": 50505,
"targetPort": "grpc"
}
]
}
}

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: om-backendapi
spec:
selector:
app: openmatch
component: backend
ports:
- protocol: TCP
port: 50505
targetPort: grpc

View File

@ -1,53 +0,0 @@
{
"apiVersion":"extensions/v1beta1",
"kind":"Deployment",
"metadata":{
"name":"om-frontendapi",
"labels":{
"app":"openmatch",
"component": "frontend"
}
},
"spec":{
"replicas":1,
"selector":{
"matchLabels":{
"app":"openmatch",
"component": "frontend"
}
},
"template":{
"metadata":{
"labels":{
"app":"openmatch",
"component": "frontend"
}
},
"spec":{
"containers":[
{
"name":"om-frontendapi",
"image":"gcr.io/matchmaker-dev-201405/openmatch-frontendapi:dev",
"imagePullPolicy":"Always",
"ports": [
{
"name": "grpc",
"containerPort": 50504
},
{
"name": "metrics",
"containerPort": 9555
}
],
"resources":{
"requests":{
"memory":"100Mi",
"cpu":"100m"
}
}
}
]
}
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: om-frontendapi
labels:
app: openmatch
component: frontend
spec:
replicas: 1
selector:
matchLabels:
app: openmatch
component: frontend
template:
metadata:
labels:
app: openmatch
component: frontend
spec:
containers:
- name: om-frontendapi
image: gcr.io/open-match-public-images/openmatch-frontendapi:dev
imagePullPolicy: Always
ports:
- name: grpc
containerPort: 50504
- name: metrics
containerPort: 9555
resources:
requests:
memory: 100Mi
cpu: 100m

View File

@ -1,20 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-frontendapi"
},
"spec": {
"selector": {
"app": "openmatch",
"component": "frontend"
},
"ports": [
{
"protocol": "TCP",
"port": 50504,
"targetPort": "grpc"
}
]
}
}

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: om-frontendapi
spec:
selector:
app: openmatch
component: frontend
ports:
- protocol: TCP
port: 50504
targetPort: grpc

View File

@ -1,27 +0,0 @@
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "ServiceMonitor",
"metadata": {
"name": "openmatch-metrics",
"labels": {
"app": "openmatch",
"agent": "opencensus",
"destination": "prometheus"
}
},
"spec": {
"selector": {
"matchLabels": {
"app": "openmatch",
"agent": "opencensus",
"destination": "prometheus"
}
},
"endpoints": [
{
"port": "metrics",
"interval": "10s"
}
]
}
}

View File

@ -0,0 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: openmatch-metrics
labels:
app: openmatch
agent: opencensus
destination: prometheus
spec:
selector:
matchLabels:
app: openmatch
agent: opencensus
destination: prometheus
endpoints:
- port: metrics
interval: 10s

View File

@ -1,78 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-frontend-metrics",
"labels": {
"app": "openmatch",
"component": "frontend",
"agent": "opencensus",
"destination": "prometheus"
}
},
"spec": {
"selector": {
"app": "openmatch",
"component": "frontend"
},
"ports": [
{
"name": "metrics",
"targetPort": 9555,
"port": 19555
}
]
}
}
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-backend-metrics",
"labels": {
"app": "openmatch",
"component": "backend",
"agent": "opencensus",
"destination": "prometheus"
}
},
"spec": {
"selector": {
"app": "openmatch",
"component": "backend"
},
"ports": [
{
"name": "metrics",
"targetPort": 9555,
"port": 29555
}
]
}
}
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-mmforc-metrics",
"labels": {
"app": "openmatch",
"component": "mmforc",
"agent": "opencensus",
"destination": "prometheus"
}
},
"spec": {
"selector": {
"app": "openmatch",
"component": "mmforc"
},
"ports": [
{
"name": "metrics",
"targetPort": 9555,
"port": 39555
}
]
}
}

View File

@ -0,0 +1,54 @@
---
kind: Service
apiVersion: v1
metadata:
name: om-frontend-metrics
labels:
app: openmatch
component: frontend
agent: opencensus
destination: prometheus
spec:
selector:
app: openmatch
component: frontend
ports:
- name: metrics
targetPort: 9555
port: 19555
---
kind: Service
apiVersion: v1
metadata:
name: om-backend-metrics
labels:
app: openmatch
component: backend
agent: opencensus
destination: prometheus
spec:
selector:
app: openmatch
component: backend
ports:
- name: metrics
targetPort: 9555
port: 29555
---
kind: Service
apiVersion: v1
metadata:
name: om-mmforc-metrics
labels:
app: openmatch
component: mmforc
agent: opencensus
destination: prometheus
spec:
selector:
app: openmatch
component: mmforc
ports:
- name: metrics
targetPort: 9555
port: 39555

View File

@ -1,59 +0,0 @@
{
"apiVersion":"extensions/v1beta1",
"kind":"Deployment",
"metadata":{
"name":"om-mmforc",
"labels":{
"app":"openmatch",
"component": "mmforc"
}
},
"spec":{
"replicas":1,
"selector":{
"matchLabels":{
"app":"openmatch",
"component": "mmforc"
}
},
"template":{
"metadata":{
"labels":{
"app":"openmatch",
"component": "mmforc"
}
},
"spec":{
"containers":[
{
"name":"om-mmforc",
"image":"gcr.io/matchmaker-dev-201405/openmatch-mmforc:dev",
"imagePullPolicy":"Always",
"ports": [
{
"name": "metrics",
"containerPort":9555
}
],
"resources":{
"requests":{
"memory":"100Mi",
"cpu":"100m"
}
},
"env":[
{
"name":"METADATA_NAMESPACE",
"valueFrom": {
"fieldRef": {
"fieldPath": "metadata.namespace"
}
}
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,35 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: om-mmforc
labels:
app: openmatch
component: mmforc
spec:
replicas: 1
selector:
matchLabels:
app: openmatch
component: mmforc
template:
metadata:
labels:
app: openmatch
component: mmforc
spec:
containers:
- name: om-mmforc
image: gcr.io/open-match-public-images/openmatch-mmforc:dev
imagePullPolicy: Always
ports:
- name: metrics
containerPort: 9555
resources:
requests:
memory: 100Mi
cpu: 100m
env:
- name: METADATA_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace

View File

@ -1,19 +0,0 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "mmf-sa"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "default",
"namespace": "default"
}
],
"roleRef": {
"kind": "ClusterRole",
"name": "cluster-admin",
"apiGroup": "rbac.authorization.k8s.io"
}
}

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: mmf-sa
subjects:
- kind: ServiceAccount
name: default
namespace: default
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View File

@ -1,53 +0,0 @@
{
"apiVersion":"extensions/v1beta1",
"kind":"Deployment",
"metadata":{
"name":"om-mmlogicapi",
"labels":{
"app":"openmatch",
"component": "mmlogic"
}
},
"spec":{
"replicas":1,
"selector":{
"matchLabels":{
"app":"openmatch",
"component": "mmlogic"
}
},
"template":{
"metadata":{
"labels":{
"app":"openmatch",
"component": "mmlogic"
}
},
"spec":{
"containers":[
{
"name":"om-mmlogic",
"image":"gcr.io/matchmaker-dev-201405/openmatch-mmlogicapi:dev",
"imagePullPolicy":"Always",
"ports": [
{
"name": "grpc",
"containerPort": 50503
},
{
"name": "metrics",
"containerPort": 9555
}
],
"resources":{
"requests":{
"memory":"100Mi",
"cpu":"100m"
}
}
}
]
}
}
}
}

View File

@ -0,0 +1,32 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: om-mmlogicapi
labels:
app: openmatch
component: mmlogic
spec:
replicas: 1
selector:
matchLabels:
app: openmatch
component: mmlogic
template:
metadata:
labels:
app: openmatch
component: mmlogic
spec:
containers:
- name: om-mmlogic
image: gcr.io/open-match-public-images/openmatch-mmlogicapi:dev
imagePullPolicy: Always
ports:
- name: grpc
containerPort: 50503
- name: metrics
containerPort: 9555
resources:
requests:
memory: 100Mi
cpu: 100m

View File

@ -1,20 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "om-mmlogicapi"
},
"spec": {
"selector": {
"app": "openmatch",
"component": "mmlogic"
},
"ports": [
{
"protocol": "TCP",
"port": 50503,
"targetPort": "grpc"
}
]
}
}

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: om-mmlogicapi
spec:
selector:
app: openmatch
component: mmlogic
ports:
- protocol: TCP
port: 50503
targetPort: grpc

View File

@ -1,20 +0,0 @@
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "Prometheus",
"metadata": {
"name": "prometheus"
},
"spec": {
"serviceMonitorSelector": {
"matchLabels": {
"app": "openmatch"
}
},
"serviceAccountName": "prometheus",
"resources": {
"requests": {
"memory": "400Mi"
}
}
}
}

View File

@ -0,0 +1,12 @@
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
serviceMonitorSelector:
matchLabels:
app: openmatch
serviceAccountName: prometheus
resources:
requests:
memory: 400Mi

View File

@ -1,266 +0,0 @@
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "prometheus-operator"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "prometheus-operator"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "prometheus-operator",
"namespace": "default"
}
]
}
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "prometheus"
}
}
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRole",
"metadata": {
"name": "prometheus"
},
"rules": [
{
"apiGroups": [
""
],
"resources": [
"nodes",
"services",
"endpoints",
"pods"
],
"verbs": [
"get",
"list",
"watch"
]
},
{
"apiGroups": [
""
],
"resources": [
"configmaps"
],
"verbs": [
"get"
]
},
{
"nonResourceURLs": [
"/metrics"
],
"verbs": [
"get"
]
}
]
}
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "prometheus"
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "prometheus"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "prometheus",
"namespace": "default"
}
]
}
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRole",
"metadata": {
"name": "prometheus-operator"
},
"rules": [
{
"apiGroups": [
"extensions"
],
"resources": [
"thirdpartyresources"
],
"verbs": [
"*"
]
},
{
"apiGroups": [
"apiextensions.k8s.io"
],
"resources": [
"customresourcedefinitions"
],
"verbs": [
"*"
]
},
{
"apiGroups": [
"monitoring.coreos.com"
],
"resources": [
"alertmanagers",
"prometheuses",
"prometheuses/finalizers",
"servicemonitors"
],
"verbs": [
"*"
]
},
{
"apiGroups": [
"apps"
],
"resources": [
"statefulsets"
],
"verbs": [
"*"
]
},
{
"apiGroups": [
""
],
"resources": [
"configmaps",
"secrets"
],
"verbs": [
"*"
]
},
{
"apiGroups": [
""
],
"resources": [
"pods"
],
"verbs": [
"list",
"delete"
]
},
{
"apiGroups": [
""
],
"resources": [
"services",
"endpoints"
],
"verbs": [
"get",
"create",
"update"
]
},
{
"apiGroups": [
""
],
"resources": [
"nodes"
],
"verbs": [
"list",
"watch"
]
},
{
"apiGroups": [
""
],
"resources": [
"namespaces"
],
"verbs": [
"list"
]
}
]
}
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "prometheus-operator"
}
}
{
"apiVersion": "extensions/v1beta1",
"kind": "Deployment",
"metadata": {
"labels": {
"k8s-app": "prometheus-operator"
},
"name": "prometheus-operator"
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"labels": {
"k8s-app": "prometheus-operator"
}
},
"spec": {
"containers": [
{
"args": [
"--kubelet-service=kube-system/kubelet",
"--config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1"
],
"image": "quay.io/coreos/prometheus-operator:v0.17.0",
"name": "prometheus-operator",
"ports": [
{
"containerPort": 8080,
"name": "http"
}
],
"resources": {
"limits": {
"cpu": "200m",
"memory": "100Mi"
},
"requests": {
"cpu": "100m",
"memory": "50Mi"
}
}
}
],
"securityContext": {
"runAsNonRoot": true,
"runAsUser": 65534
},
"serviceAccountName": "prometheus-operator"
}
}
}
}

View File

@ -0,0 +1,166 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-operator
subjects:
- kind: ServiceAccount
name: prometheus-operator
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups:
- ''
resources:
- nodes
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
verbs:
- get
- nonResourceURLs:
- "/metrics"
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus-operator
rules:
- apiGroups:
- extensions
resources:
- thirdpartyresources
verbs:
- "*"
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- servicemonitors
verbs:
- "*"
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- "*"
- apiGroups:
- ''
resources:
- configmaps
- secrets
verbs:
- "*"
- apiGroups:
- ''
resources:
- pods
verbs:
- list
- delete
- apiGroups:
- ''
resources:
- services
- endpoints
verbs:
- get
- create
- update
- apiGroups:
- ''
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- namespaces
verbs:
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-operator
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-app: prometheus-operator
name: prometheus-operator
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: prometheus-operator
spec:
containers:
- args:
- "--kubelet-service=kube-system/kubelet"
- "--config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1"
image: quay.io/coreos/prometheus-operator:v0.17.0
name: prometheus-operator
ports:
- containerPort: 8080
name: http
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: prometheus-operator

View File

@ -1,22 +0,0 @@
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "prometheus"
},
"spec": {
"type": "NodePort",
"ports": [
{
"name": "web",
"nodePort": 30900,
"port": 9090,
"protocol": "TCP",
"targetPort": "web"
}
],
"selector": {
"prometheus": "prometheus"
}
}
}

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus
spec:
type: NodePort
ports:
- name: web
nodePort: 30900
port: 9090
protocol: TCP
targetPort: web
selector:
prometheus: prometheus

View File

@ -1,38 +0,0 @@
{
"apiVersion": "extensions/v1beta1",
"kind": "Deployment",
"metadata": {
"name": "redis-master"
},
"spec": {
"selector": {
"matchLabels": {
"app": "mm",
"tier": "storage"
}
},
"replicas": 1,
"template": {
"metadata": {
"labels": {
"app": "mm",
"tier": "storage"
}
},
"spec": {
"containers": [
{
"name": "redis-master",
"image": "redis:4.0.11",
"ports": [
{
"name": "redis",
"containerPort": 6379
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,22 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-master
spec:
selector:
matchLabels:
app: mm
tier: storage
replicas: 1
template:
metadata:
labels:
app: mm
tier: storage
spec:
containers:
- name: redis-master
image: redis:4.0.11
ports:
- name: redis
containerPort: 6379

View File

@ -1,20 +0,0 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "redis-sentinel"
},
"spec": {
"selector": {
"app": "mm",
"tier": "storage"
},
"ports": [
{
"protocol": "TCP",
"port": 6379,
"targetPort": "redis"
}
]
}
}

View File

@ -0,0 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: redis
spec:
selector:
app: mm
tier: storage
ports:
- protocol: TCP
port: 6379
targetPort: redis

View File

@ -1,6 +1,10 @@
# Development Guide
This doc explains how to setup a development environment so you can get started contributing to Open Match. If you instead want to write a matchmaker that _uses_ Open Match, you probably want to read the [User Guide](user_guide.md).
# Compiling from source
All components of Open Match produce (Linux) Docker container images as artifacts, and there are included `Dockerfile`s for each. [Google Cloud Platform Cloud Build](https://cloud.google.com/cloud-build/docs/) users will also find `cloudbuild_<name>.yaml` files for each component in the repository root.
All components of Open Match produce (Linux) Docker container images as artifacts, and there are included `Dockerfile`s for each. [Google Cloud Platform Cloud Build](https://cloud.google.com/cloud-build/docs/) users will also find `cloudbuild.yaml` files for each component in their respective directories. Note that most of them build from an 'base' image called `openmatch-devbase`. You can find a `Dockerfile` and `cloudbuild_base.yaml` file for this in the repository root. Build it first!
Note: Although Google Cloud Platform includes some free usage, you may incur charges following this guide if you use GCP products.
@ -11,11 +15,11 @@ Note: Although Google Cloud Platform includes some free usage, you may incur cha
**NOTE**: Before starting with this guide, you'll need to update all the URIs from the tutorial's gcr.io container image registry with the URI for your own image registry. If you are using the gcr.io registry on GCP, the default URI is `gcr.io/<PROJECT_NAME>`. Here's an example command in Linux to do the replacement for you this (replace YOUR_REGISTRY_URI with your URI, this should be run from the repository root directory):
```
# Linux
egrep -lR 'gcr.io/matchmaker-dev-201405' . | xargs sed -i -e 's|gcr.io/matchmaker-dev-201405|YOUR_REGISTRY_URI|g'
egrep -lR 'matchmaker-dev-201405' . | xargs sed -i -e 's|matchmaker-dev-201405|<PROJECT_NAME>|g'
```
```
# Mac OS, you can delete the .backup files after if all looks good
egrep -lR 'gcr.io/matchmaker-dev-201405' . | xargs sed -i'.backup' -e 's|gcr.io/matchmaker-dev-201405|YOUR_REGISTRY_URI|g'
egrep -lR 'matchmaker-dev-201405' . | xargs sed -i'.backup' -e 's|matchmaker-dev-201405|<PROJECT_NAME>|g'
```
## Example of building using Google Cloud Builder
@ -26,9 +30,14 @@ The [Quickstart for Docker](https://cloud.google.com/cloud-build/docs/quickstart
* In Linux, you can run the following one-line bash script to compile all the images for the first time, and push them to your gcr.io registry. You must enable the [Container Registry API](https://console.cloud.google.com/flows/enableapi?apiid=containerregistry.googleapis.com) first.
```
# First, build the 'base' image. Some other images depend on this so it must complete first.
gcloud build submit --config cloudbuild_base.yaml
gcloud builds submit --config cloudbuild_base.yaml
# Build all other images.
for dfile in $(ls Dockerfile.* | grep -v base); do gcloud builds submit --config cloudbuild_${dfile##*.}.yaml & done
for dfile in $(find . -name "Dockerfile" -iregex "./\(cmd\|test\|examples\)/.*"); do cd $(dirname ${dfile}); gcloud builds submit --config cloudbuild.yaml & cd -; done
```
Note: as of v0.3.0 alpha, the Python and PHP MMF examples still depend on the previous way of building until [issue #42, introducing new config management](https://github.com/GoogleCloudPlatform/open-match/issues/42) is resolved (apologies for the inconvenience):
```
gcloud builds submit --config cloudbuild_mmf_py3.yaml
gcloud builds submit --config cloudbuild_mmf_php.yaml
```
* Once the cloud builds have completed, you can verify that all the builds succeeded in the cloud console or by by checking the list of images in your **gcr.io** registry:
```
@ -41,22 +50,12 @@ The [Quickstart for Docker](https://cloud.google.com/cloud-build/docs/quickstart
gcr.io/matchmaker-dev-201405/openmatch-devbase
gcr.io/matchmaker-dev-201405/openmatch-evaluator
gcr.io/matchmaker-dev-201405/openmatch-frontendapi
gcr.io/matchmaker-dev-201405/openmatch-mmf
gcr.io/matchmaker-dev-201405/openmatch-mmf-golang-manual-simple
gcr.io/matchmaker-dev-201405/openmatch-mmf-php-mmlogic-simple
gcr.io/matchmaker-dev-201405/openmatch-mmf-py3-mmlogic-simple
gcr.io/matchmaker-dev-201405/openmatch-mmforc
gcr.io/matchmaker-dev-201405/openmatch-mmlogicapi
```
* The default example MMF images all use the same name (`openmatch-mmf`), with different image tags designating the different examples. You can check that these exist by running this command (again, substituting your **gcr.io** registry):
```
gcloud container images list-tags gcr.io/matchmaker-dev-201405/openmatch-mmf
```
You should see tags for several of the example MMFs. By default, Open Match will try to use the `openmatch-mmf:py3` image in the examples below, so it is important that the image build was successful and a `py3` image tag exists in your **gcr.io** registry before you continue:
```
DIGEST TAGS TIMESTAMP
5345475e026c php 2018-12-05T00:06:47
e5c274c3509c go 2018-12-05T00:02:17
1b3ec3176d0f py3 2018-12-05T00:02:07
```
## Example of starting a GKE cluster
A cluster with mostly default settings will work for this development guide. In the Cloud SDK command below we start it with machines that have 4 vCPUs. Alternatively, you can use the 'Create Cluster' button in [Google Cloud Console]("https://console.cloud.google.com/kubernetes").
@ -73,7 +72,7 @@ gcloud compute zones list
## Configuration
Currently, each component reads a local config file `matchmaker_config.json`, and all components assume they have the same configuration (if you would like to help us design the replacement config solution, please join the [discussion](https://github.com/GoogleCloudPlatform/open-match/issues/42). To this end, there is a single centralized config file located in the `<REPO_ROOT>/config/` which is symlinked to each component's subdirectory for convenience when building locally.
Currently, each component reads a local config file `matchmaker_config.json`, and all components assume they have the same configuration (if you would like to help us design the replacement config solution, please join the [discussion](https://github.com/GoogleCloudPlatform/open-match/issues/42). To this end, there is a single centralized config file located in the `<REPO_ROOT>/config/` which is symlinked to each component's subdirectory for convenience when building locally. Note: [there is an issue with symlinks on Windows](../issues/57).
## Running Open Match in a development environment
@ -81,24 +80,24 @@ The rest of this guide assumes you have a cluster (example is using GKE, but wor
* Start a copy of redis and a service in front of it:
```
kubectl apply -f redis_deployment.json
kubectl apply -f redis_service.json
kubectl apply -f redis_deployment.yaml
kubectl apply -f redis_service.yaml
```
* Run the **core components**: the frontend API, the backend API, the matchmaker function orchestrator (MMFOrc), and the matchmaking logic API.
**NOTE** In order to kick off jobs, the matchmaker function orchestrator needs a service account with permission to administer the cluster. This should be updated to have min required perms before launch, this is pretty permissive but acceptable for closed testing:
```
kubectl apply -f backendapi_deployment.json
kubectl apply -f backendapi_service.json
kubectl apply -f frontendapi_deployment.json
kubectl apply -f frontendapi_service.json
kubectl apply -f mmforc_deployment.json
kubectl apply -f mmforc_serviceaccount.json
kubectl apply -f mmlogic_deployment.json
kubectl apply -f mmlogic_service.json
kubectl apply -f backendapi_deployment.yaml
kubectl apply -f backendapi_service.yaml
kubectl apply -f frontendapi_deployment.yaml
kubectl apply -f frontendapi_service.yaml
kubectl apply -f mmforc_deployment.yaml
kubectl apply -f mmforc_serviceaccount.yaml
kubectl apply -f mmlogicapi_deployment.yaml
kubectl apply -f mmlogicapi_service.yaml
```
* [optional, but recommended] Configure the OpenCensus metrics services:
```
kubectl apply -f metrics_services.json
kubectl apply -f metrics_services.yaml
```
* [optional] Trying to apply the Kubernetes Prometheus Operator resource definition files without a cluster-admin rolebinding on GKE doesn't work without running the following command first. See https://github.com/coreos/prometheus-operator/issues/357
```
@ -107,10 +106,10 @@ The rest of this guide assumes you have a cluster (example is using GKE, but wor
* [optional, uses beta software] If using Prometheus as your metrics gathering backend, configure the [Prometheus Kubernetes Operator](https://github.com/coreos/prometheus-operator):
```
kubectl apply -f prometheus_operator.json
kubectl apply -f prometheus.json
kubectl apply -f prometheus_service.json
kubectl apply -f metrics_servicemonitor.json
kubectl apply -f prometheus_operator.yaml
kubectl apply -f prometheus.yaml
kubectl apply -f prometheus_service.yaml
kubectl apply -f metrics_servicemonitor.yaml
```
You should now be able to see the core component pods running using a `kubectl get pods`, and the core component metrics in the Prometheus Web UI by running `kubectl proxy <PROMETHEUS_POD_NAME> 9090:9090` in your local shell, then opening http://localhost:9090/targets in your browser to see which services Prometheus is collecting from.
@ -135,7 +134,7 @@ service/om-mmforc-metrics ClusterIP 10.59.240.59 <none> 39555/TC
service/om-mmlogicapi ClusterIP 10.59.248.3 <none> 50503/TCP 9m
service/prometheus NodePort 10.59.252.212 <none> 9090:30900/TCP 9m
service/prometheus-operated ClusterIP None <none> 9090/TCP 9m
service/redis-sentinel ClusterIP 10.59.249.197 <none> 6379/TCP 9m
service/redis ClusterIP 10.59.249.197 <none> 6379/TCP 9m
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deployment.extensions/om-backendapi 1 1 1 1 9m
@ -179,9 +178,9 @@ statefulset.apps/prometheus-prometheus 1 1 9m
In the end: *caveat emptor*. These tools all work and are quite small, and as such are fairly easy for developers to understand by looking at the code and logging output. They are provided as-is just as a reference point of how to begin experimenting with Open Match integrations.
* `examples/frontendclient` is a fake client for the Frontend API. It pretends to be a real game client connecting to Open Match and requests a game, then dumps out the connection string it receives. Note that it doesn't actually test the return path by looking for arbitrary results from your matchmaking function; it pauses and tells you the name of a key to set a connection string in directly using a redis-cli client. **Note**: If you're using the rest of these test programs, you're probably using the Backend Client below. The default profiles that sends to the backend look for way more than one player, so if you want to see meaningful results from running this Frontend Client, you're going to need to generate a bunch of fake players using the client load simulation tool at the same time. Otherwise, expect to wait until it times out as your matchmaker never has enough players to make a successful match.
* `test/cmd/frontendclient/` is a fake client for the Frontend API. It pretends to be group of real game clients connecting to Open Match. It requests a game, then dumps out the results each player receives to the screen until you press the enter key. **Note**: If you're using the rest of these test programs, you're probably using the Backend Client below. The default profiles that command sends to the backend look for many more than one player, so if you want to see meaningful results from running this Frontend Client, you're going to need to generate a bunch of fake players using the client load simulation tool at the same time. Otherwise, expect to wait until it times out as your matchmaker never has enough players to make a successful match.
* `examples/backendclient` is a fake client for the Backend API. It pretends to be a dedicated game server backend connecting to openmatch and sending in a match profile to fill. Once it receives a match object with a roster, it will also issue a call to assign the player IDs, and gives an example connection string. If it never seems to get a match, make sure you're adding players to the pool using the other two tools. Note: building this image requires that you first build the 'base' dev image (look for `cloudbuild_base.yaml` and `Dockerfile.base` in the root directory) and then update the first step to point to that image in your registry. This will be simplified in a future release. **Note**: If you run this by itself, expect it to wait about 30 seconds, then return a result of 'insufficient players' and exit - this is working as intended. Use the client load simulation tool below to add players to the pool or you'll never be able to make a successful match.
* `test/cmd/client` is a (VERY) basic client load simulation tool. It does **not** test the Frontend API - in fact, it ignores it and writes players directly to state storage on its own. It doesn't do anything but loop endlessly, writing players into state storage so you can test your backend integration, and run your custom MMFs and Evaluators (which are only triggered when there are players in the pool).
* `test/cmd/clientloadgen/` is a (VERY) basic client load simulation tool. It does **not** test the Frontend API - in fact, it ignores it and writes players directly to state storage on its own. It doesn't do anything but loop endlessly, writing players into state storage so you can test your backend integration, and run your custom MMFs and Evaluators (which are only triggered when there are players in the pool).
### Resources

View File

@ -1 +1,33 @@
During alpha, please do not use Open Match as-is in production. To develop against it, please see the [development guide](development.md).
# "Productionizing" a deployment
Here are some steps that should be taken to productionize your Open Match deployment before exposing it to live public traffic. Some of these overlap with best practices for [productionizing Kubernetes](https://cloud.google.com/blog/products/gcp/exploring-container-security-running-a-tight-ship-with-kubernetes-engine-1-10) or cloud infrastructure more generally. We will work to make as many of these into the default deployment strategy for Open Match as possible, going forward.
**This is not an exhaustive list and addressing the items in this document alone shouldn't be considered sufficient. Every game is different and will have different production needs.**
## Kubernetes
All the usual guidance around hardening and securing Kubernetes are applicable to running Open Match. [Here is a guide around security for Google Kubernetes Enginge on GCP](https://cloud.google.com/blog/products/gcp/exploring-container-security-running-a-tight-ship-with-kubernetes-engine-1-10), and a number of other guides are available from reputable sources on the internet.
### Minimum permissions on Kubernetes
* The components of Open Match should be run in a separate Kubernetes namespace if you're also using the cluster for other services. As of 0.3.0 they run in the 'default' namespace if you follow the development guide.
* Note that the default MMForc process has cluster management permissions by default. Before moving to production, you should create a role with only access to create kubernetes jobs and configure the MMForc to use it.
### Kubernetes Jobs (MMFOrc)
The 0.3.0 MMFOrc component runs your MMFs as Kubernetes Jobs. You should periodically delete these jobs to keep the cluster running smoothly. How often you need to delete them is dependant on how many you are running. There are a number of open source solutions to do this for you. ***Note that once you delete the job, you won't have access to that job's logs anymore unless you're sending your logs from kubernetes to a log aggregator like Google Stackdriver. This can make it a challenge to troubleshoot issues***
### CPU and Memory limits
For any production Kubernetes deployment, it is good practice to profile your processes and select [resource limits and requests](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) according to your results. For example, you'll likely want to set adequate resource requests based on your expected player base and some load testing for the Redis state storage pods. This will help Kubernetes avoid scheduling other intensive processes on the same underlying node and keep you from running into resource contention issues. Another example might be an MMF with a particularly large memory or CPU footprint - maybe you have one that searches a lot of players for a potential match. This would be a good candidate for resource limits and requests in Kubernetes to both ensure it gets the CPU and RAM it needs to complete quickly, and to make sure it's not scheduled alongside another intensive Kubernetes pod.
### State storage
The default state storage for Open Match is a _single instance_ of Redis. Although it _is_ possible to go to production with this as the solution if you're willing to accept the potential downsides, for most deployments, a HA Redis configuration would better fit your needs. An example YAML file for creating a [self-healing HA Redis deployment on Kubernetes](../install/yaml/01-redis-failover.yaml) is available. Regardless of which configuation you use, it is probably a good idea to put some [resource requests](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) in your Kubernetes resource definition as mentioned above.
You can find more discussion in the [state storage readme doc](../internal/statestorage/redis/README.md).
## Open Match config
Debug logging and the extra debug code paths should be disabled in the `config/matchmaker_config.json` file (as of the time of this writing, 0.3.0).
## Public APIs for Open Match
In many cases, you may choose to configure your game clients to connect to the Open Match Frontend API, and in a few select cases (such as using it for P2P non-dedicated game server hosting), the game client may also need to connect to the Backend API. In these cases, it is important to secure the API endpoints against common attacks, such as DDoS or malformed packet floods.
* Using a cloud provider's Load Balancer in front of the Kubernetes Service is a common approach to enable vendor-specific DDoS protections. Check the documentation for your cloud vendor's Load Balancer for more details ([GCP's DDoS protection](https://cloud.google.com/armor/)).
* Using an API framework can be used to limit endpoint access to only game clients you have authenticated using your platform's authentication service. This may be accomplished with simple authentication tokens or a more complex scheme depending on your needs.
## Testing
(as of 0.3.0) The provided test programs are just for validating that Open Match is operating correctly; they are command-line applications designed to be run from within the same cluster as Open Match and are therefore not a suitable test harness for doing production testing to make sure your matchmaker is ready to handle your live game. Instead, it is recommended that you integrate Open Match into your game client and test it using the actual game flow players will use if at all possible.
### Load testing
Ideally, you would already be making 'headless' game clients for automated qa and load testing of your game servers; it is recommended that you also code these testing clients to be able to act as a mock player connecting to Open Match. Load testing platform services is a huge topic and should reflect your actual game access patterns as closely as possible, which will be very game dependant.
**Note: It is never a good idea to do load testing against a cloud vendor without informing them first!**

20
docs/roadmap.md Normal file
View File

@ -0,0 +1,20 @@
# Roadmap. [Subject to change]
Releases are scheduled for every 6 weeks. **Every release is a stable, long-term-support version**. Even for alpha releases, best-effort support is available. With a little work and input from an experienced live services developer, you can go to production with any version on the [releases page](https://github.com/GoogleCloudPlatform/open-match/releases).
Our current thinking is to wait to take Open Match out of alpha/beta (and label it 1.0) until it can be used out-of-the-box, standalone, for developers that dont have any existing platform services. Which is to say, the majority of **established game developers likely won't have any reason to wait for the 1.0 release if Open Match already handles your needs**. If you already have live platform services that you plan to integrate Open Match with (player authentication, a group invite system, dedicated game servers, metrics collection, logging aggregation, etc), then a lot of the features planned between 0.4.0 and 1.0 likely aren't of much interest to you anyway.
## Upcoming releases
* **0.4.0** &mdash; Agones Integration & MMF on [Knative](https://cloud.google.com/knative/)
MMF instrumentation
Match object expiration / lazy deletion
API autoscaling by default
API changes after this will likely be additions or very minor
* **0.5.0** &mdash; Tracing, Metrics, and KPI Dashboard
* **0.6.0** &mdash; Load testing suite
* **1.0.0** &mdash; API Formally Stable. Breaking API changes will require a new major version number.
* **1.1.0** &mdash; Canonical MMFs
## Philosophy
* The next version (0.4.0) will focus on making MMFs run on serverless platforms - specifically Knative. This will just be first steps, as Knative is still pretty early. We want to get a proof of concept working so we can roadmap out the future "MMF on Knative" experience. Our intention is to keep MMFs as compatible as possible with the current Kubernetes job-based way of doing them. Our hope is that by the time Knative is mature, well be able to provide a [Knative build](https://github.com/Knative/build) pipeline that will take existing MMFs and build them as Knative functions. In the meantime, well map out a relatively painless (but not yet fully automated) way to make an existing MMF into a Kubernetes Deployment that looks as similar to what [Knative serving](https://github.com/knative/serving) is shaping up to be, in an effort to make the eventual switchover painless. Basically all of this is just _optimizing MMFs to make them spin up faster and take less resources_, **we're not planning to change what MMFs do or the interfaces they need to fulfill**. Existing MMFs will continue to run as-is, and in the future moving them to Knative should be both **optional** and **largely automated**.
* 0.4.0 represents the natural stopping point for adding new functionality until we have more community uptake and direction. We don't anticipate many API changes in 0.4.0 and beyond. Maybe new API calls for new functionality, but we're unlikely to see big shifts in existing calls through 1.0 and its point releases. We'll issue a new major release version if we decide we need those changes.
* The 0.5.0 version and beyond will be focused on operationalizing the out-of-the-box experience. Metrics and analytics and a default dashboard, additional tooling, and a load testing suite are all planned. We want it to be easy for operators to see KPI and know what's going on with Open Match.

16
examples/backendclient/Dockerfile Executable file → Normal file
View File

@ -1,8 +1,10 @@
#FROM golang:1.10.3 as builder
FROM gcr.io/matchmaker-dev-201405/openmatch-devbase as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/examples/backendclient
COPY ./ ./
RUN go get -d -v
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o backendclient .
FROM open-match-base-build as builder
CMD ["./backendclient"]
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/examples/backendclient/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/examples/backendclient/backendclient .
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/examples/backendclient/profiles profiles
ENTRYPOINT ["/backendclient"]

View File

@ -1,11 +1,10 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'pull', 'gcr.io/$PROJECT_ID/openmatch-devbase' ]
args: [ 'pull', 'gcr.io/$PROJECT_ID/openmatch-base:dev' ]
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-backendclient:dev',
'--cache-from=gcr.io/$PROJECT_ID/openmatch-devbase:latest',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-backendclient:dev']

View File

@ -25,7 +25,6 @@ import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
@ -35,6 +34,7 @@ import (
backend "github.com/GoogleCloudPlatform/open-match/internal/pb"
"github.com/tidwall/gjson"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
func bytesToString(data []byte) string {
@ -52,10 +52,12 @@ func main() {
// Read the profile
filename := "profiles/testprofile.json"
if len(os.Args) > 1 {
filename = os.Args[1]
}
log.Println("Reading profile from ", filename)
/*
if len(os.Args) > 1 {
filename = os.Args[1]
}
log.Println("Reading profile from ", filename)
*/
jsonFile, err := os.Open(filename)
if err != nil {
panic("Failed to open file specified at command line. Did you forget to specify one?")
@ -116,21 +118,26 @@ func main() {
if err != nil {
log.Fatalf("Attempting to open stream for ListMatches(_) = _, %v", err)
}
log.Printf("Waiting for matches...")
//for i := 0; i < 2; i++ {
for {
log.Printf("Waiting for matches...")
match, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Error reading stream for ListMatches(_) = _, %v", err)
stat, ok := status.FromError(err)
if ok {
log.Printf("Error reading stream for ListMatches() returned status: %s %s", stat.Code().String(), stat.Message())
} else {
log.Printf("Error reading stream for ListMatches() returned status: %s", err)
}
break
}
if match.Properties == "{error: insufficient_players}" {
log.Println("Waiting for a larger player pool...")
break
//break
}
// Validate JSON before trying to parse it
@ -139,36 +146,29 @@ func main() {
}
log.Println("Received match:")
ppJSON(match.Properties)
fmt.Println(match)
//fmt.Println(match) // Debug
/*
// Get players from the json properties.roster field
log.Println("Gathering roster from received match...")
players := make([]string, 0)
result := gjson.Get(match.Properties, "properties.roster")
result.ForEach(func(teamName, teamRoster gjson.Result) bool {
teamRoster.ForEach(func(_, player gjson.Result) bool {
players = append(players, player.String())
return true // keep iterating
})
return true // keep iterating
})
//log.Printf("players = %+v\n", players)
// Assign players in this match to our server
connstring := "example.com:12345"
if len(os.Args) >= 2 {
connstring = os.Args[1]
log.Printf("Player assignment '%v' specified at commandline", connstring)
}
log.Println("Assigning players to DGS at", connstring)
// Assign players in this match to our server
log.Println("Assigning players to DGS at example.com:12345")
playerstr := strings.Join(players, " ")
roster := &backend.Roster{PlayerIds: playerstr}
ci := &backend.ConnectionInfo{ConnectionString: "example.com:12345"}
assign := &backend.Assignments{Roster: roster, ConnectionInfo: ci}
_, err = client.CreateAssignments(context.Background(), assign)
if err != nil {
panic(err)
assign := &backend.Assignments{Rosters: match.Rosters, Assignment: connstring}
log.Printf("Waiting for matches...")
_, err = client.CreateAssignments(context.Background(), assign)
if err != nil {
stat, ok := status.FromError(err)
if ok {
log.Printf("Error reading stream for ListMatches() returned status: %s %s", stat.Code().String(), stat.Message())
} else {
log.Printf("Error reading stream for ListMatches() returned status: %s", err)
}
*/
break
}
log.Println("Success! Not deleting assignments [demo mode].")
}

View File

@ -1,5 +1,5 @@
{
"imagename":"gcr.io/matchmaker-dev-201405/openmatch-mmf:py3",
"imagename":"gcr.io/open-match-public-images/openmatch-mmf-py3-mmlogic-simple:dev",
"name":"testprofilev1",
"id":"testprofile",
"properties":{

View File

@ -0,0 +1,9 @@
FROM open-match-base-build as builder
WORKDIR /go/src/github.com/GoogleCloudPlatform/open-match/examples/evaluators/golang/simple/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo .
FROM gcr.io/distroless/static
COPY --from=builder /go/src/github.com/GoogleCloudPlatform/open-match/examples/evaluators/golang/simple/simple .
ENTRYPOINT ["/simple"]

View File

@ -1,9 +1,10 @@
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'pull', 'gcr.io/$PROJECT_ID/openmatch-base:dev' ]
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'--tag=gcr.io/$PROJECT_ID/openmatch-evaluator:dev',
'-f', 'Dockerfile.evaluator',
'.'
]
images: ['gcr.io/$PROJECT_ID/openmatch-evaluator:dev']

View File

@ -32,7 +32,9 @@ import (
"strings"
"time"
"github.com/GoogleCloudPlatform/open-match/config"
om_messages "github.com/GoogleCloudPlatform/open-match/internal/pb"
redishelpers "github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis"
"github.com/GoogleCloudPlatform/open-match/internal/statestorage/redis/redispb"
"github.com/gobs/pretty"
"github.com/gomodule/redigo/redis"
@ -47,30 +49,18 @@ func main() {
// Read config
lgr.Println("Initializing config...")
cfg, err := readConfig("matchmaker_config", map[string]interface{}{
"REDIS_SENTINEL_SERVICE_HOST": "redis-sentinel",
"REDIS_SENTINEL_SERVICE_PORT": "6379",
"auth": map[string]string{
// Read from k8s secret eventually
// Probably doesn't need a map, just here for reference
"password": "12fa",
},
})
cfg, err := config.Read()
if err != nil {
panic(nil)
}
// Connect to redis
// As per https://www.iana.org/assignments/uri-schemes/prov/redis
// redis://user:secret@localhost:6379/0?foo=bar&qux=baz // redis pool docs: https://godoc.org/github.com/gomodule/redigo/redis#Pool
redisURL := "redis://" + cfg.GetString("REDIS_SENTINEL_SERVICE_HOST") + ":" + cfg.GetString("REDIS_SENTINEL_SERVICE_PORT")
lgr.Println("Connecting to redis at", redisURL)
pool := redis.Pool{
MaxIdle: 3,
MaxActive: 0,
IdleTimeout: 60 * time.Second,
Dial: func() (redis.Conn, error) { return redis.DialURL(redisURL) },
pool, err := redishelpers.ConnectionPool(cfg)
if err != nil {
lgr.Fatal(err)
}
defer pool.Close()
redisConn := pool.Get()
defer redisConn.Close()
@ -82,7 +72,7 @@ func main() {
start := time.Now()
proposedMatchIds, overloadedPlayers, overloadedMatches, approvedMatches, err := stub(cfg, &pool)
proposedMatchIds, overloadedPlayers, overloadedMatches, approvedMatches, err := stub(cfg, pool)
overloadedPlayerList, overloadedMatchList, approvedMatchList := generateLists(overloadedPlayers, overloadedMatches, approvedMatches)
fmt.Println("overloadedPlayers")
@ -151,36 +141,6 @@ func chooseMatches(overloaded []int) ([]int, []int, error) {
return []int{}, overloaded, nil
}
func readConfig(filename string, defaults map[string]interface{}) (*viper.Viper, error) {
/*
Examples of redis-related env vars as written by k8s
REDIS_SENTINEL_PORT_6379_TCP=tcp://10.55.253.195:6379
REDIS_SENTINEL_PORT=tcp://10.55.253.195:6379
REDIS_SENTINEL_PORT_6379_TCP_ADDR=10.55.253.195
REDIS_SENTINEL_SERVICE_PORT=6379
REDIS_SENTINEL_PORT_6379_TCP_PORT=6379
REDIS_SENTINEL_PORT_6379_TCP_PROTO=tcp
REDIS_SENTINEL_SERVICE_HOST=10.55.253.195
*/
v := viper.New()
for key, value := range defaults {
v.SetDefault(key, value)
}
v.SetConfigName(filename)
v.SetConfigType("json")
v.AddConfigPath(".")
v.AutomaticEnv()
// Optional read from config if it exists
err := v.ReadInConfig()
if err != nil {
//lgr.Printf("error when reading config: %v\n", err)
//lgr.Println("continuing...")
err = nil
}
return v, err
}
func stub(cfg *viper.Viper, pool *redis.Pool) ([]string, map[string][]int, map[int][]int, map[int]bool, error) {
//Init Logger
lgr := log.New(os.Stdout, "MMFEvalStub: ", log.LstdFlags)

View File

@ -1 +0,0 @@
../../../../config/matchmaker_config.json

View File

@ -0,0 +1 @@
../../../../config/matchmaker_config.yaml

Some files were not shown because too many files have changed in this diff Show More