mirror of
https://github.com/googleforgames/open-match.git
synced 2025-03-25 13:24:18 +00:00
Compare commits
64 Commits
v0.9.0
...
v1.0.0-rc.
Author | SHA1 | Date | |
---|---|---|---|
5f8febb517 | |||
93df53201c | |||
eb86841423 | |||
771f706317 | |||
a9f9a2f2e6 | |||
068632285e | |||
113461114e | |||
0ac7ae13ac | |||
29a2dbcf99 | |||
48d3b5c0ee | |||
a5fa651106 | |||
cd84d74ff9 | |||
8c2aa1ea81 | |||
493ff8e520 | |||
8363bc5fc9 | |||
144f646b7f | |||
b518b5cc1b | |||
af0b9fd5f7 | |||
5f4b522ecd | |||
12625d7f53 | |||
3248c8c4ad | |||
10c0c59997 | |||
c17e3e62c0 | |||
8e91be6201 | |||
f6c837d6cd | |||
3c8908aae0 | |||
0689d92d9c | |||
3c9a8f5568 | |||
30204a2d15 | |||
a5b6c0c566 | |||
4a00baf847 | |||
d74262f3ba | |||
2262652ea9 | |||
e15fd47535 | |||
670f38d36e | |||
f0a85633a5 | |||
6cb47ce191 | |||
529c01330e | |||
b36a348db7 | |||
5e277265ad | |||
4420d7add2 | |||
3de052279b | |||
7a4aa3589f | |||
bca6f487cc | |||
d0c373a850 | |||
deb2947ae2 | |||
d889278151 | |||
1b63fa53dc | |||
af02e4818f | |||
cda2d3185f | |||
2317977602 | |||
9ef83ed344 | |||
33bd633b1d | |||
1af8cf1e79 | |||
0ef46fc4d4 | |||
79daf50531 | |||
a9c327b430 | |||
2c637c97b8 | |||
668b10030b | |||
1c7fd24a34 | |||
be0cebd457 | |||
fe7bb4da8f | |||
e80de171a0 | |||
fdd707347e |
.golangci.yamlDockerfile.base-buildDockerfile.ciMakefileREADME.mdgo.modgo.sum
api
backend.protobackend.swagger.jsonevaluator.swagger.jsonfrontend.protofrontend.swagger.jsonmatchfunction.swagger.jsonmessages.protoquery.protoquery.swagger.json
cloudbuild.yamlcmd
backend
default-evaluator
frontend
minimatch
query
scale-backend
scale-frontend
swaggerui
synchronizer
docs
examples
demo/components
scale
README.md
backend
frontend
scenarios
install
02-open-match-demo.yaml
helm/open-match
Chart.yaml
subcharts
open-match-customize
open-match-scale
open-match-telemetry/dashboards
templates
values-production.yamlvalues.yamlinternal
app
appmain
config
filter
omerror
rpc
clientcache.goclientcache_test.goclients.goclients_test.goinsecure.goinsecure_test.golistener_holder.golistener_holder_test.goserver.goserver_test.go
testing
tls_server.gotls_server_test.goset
signal
statestore
telemetry
configz.goconfigz_test.gohelp.gohelp_test.gojaeger.gometrics.gometrics_test.goopencensus_agent.goprobe.goprobe_test.goprometheus.gopublic.gostackdriver.gozpages.go
testing
e2e
cluster.gocluster_test.gocommon.goconsts.gofetch_matches_test.goin_memory.gomain_test.goquery_tickets_test.goticket_test.go
evaluator
fake_frontend.gommf
util
pkg
matchfunction
pb
test
e2e
evaluator
matchfunction
third_party
google
protoc-gen-swagger/options
swaggerui
tools
tutorials
custom_evaluator
default_evaluator
director
frontend
matchfunction
solution
matchmaker101
director
frontend
matchfunction
solution
matchmaker102
@ -171,17 +171,10 @@ linters:
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosec
|
||||
- interfacer # deprecated - "A tool that suggests interfaces is prone to bad suggestions"
|
||||
- lll
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
|
||||
#linters:
|
||||
# enable-all: true
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
|
||||
FROM golang:1.13.4
|
||||
FROM golang:1.14.0
|
||||
ENV GO111MODULE=on
|
||||
|
||||
WORKDIR /go/src/open-match.dev/open-match
|
||||
|
@ -34,13 +34,13 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-stretch" && \
|
||||
apt-get update -y && apt-get install google-cloud-sdk google-cloud-sdk-app-engine-go -y -qq
|
||||
|
||||
# Install Golang
|
||||
# https://github.com/docker-library/golang/blob/master/1.13/stretch/Dockerfile
|
||||
# https://github.com/docker-library/golang/blob/master/1.14/stretch/Dockerfile
|
||||
RUN mkdir -p /toolchain/golang
|
||||
WORKDIR /toolchain/golang
|
||||
RUN sudo rm -rf /usr/local/go/
|
||||
|
||||
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
|
||||
RUN curl -L https://golang.org/dl/go1.13.4.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
|
||||
RUN curl -L https://golang.org/dl/go1.14.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
150
Makefile
150
Makefile
@ -52,7 +52,7 @@
|
||||
# If you want information on how to edit this file checkout,
|
||||
# http://makefiletutorial.com/
|
||||
|
||||
BASE_VERSION = 0.0.0-dev
|
||||
BASE_VERSION = 1.0.0-rc.1
|
||||
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
|
||||
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
|
||||
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
|
||||
@ -67,6 +67,8 @@ MINIKUBE_VERSION = latest
|
||||
GOLANGCI_VERSION = 1.18.0
|
||||
KIND_VERSION = 0.5.1
|
||||
SWAGGERUI_VERSION = 3.24.2
|
||||
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
|
||||
GRPC_GATEWAY_VERSION = 1.14.3
|
||||
TERRAFORM_VERSION = 0.12.13
|
||||
CHART_TESTING_VERSION = 2.4.0
|
||||
|
||||
@ -77,7 +79,6 @@ ENABLE_SECURITY_HARDENING = 0
|
||||
GO = GO111MODULE=on go
|
||||
# Defines the absolute local directory of the open-match project
|
||||
REPOSITORY_ROOT := $(patsubst %/,%,$(dir $(abspath $(MAKEFILE_LIST))))
|
||||
GO_BUILD_COMMAND = CGO_ENABLED=0 $(GO) build -a -installsuffix cgo .
|
||||
BUILD_DIR = $(REPOSITORY_ROOT)/build
|
||||
TOOLCHAIN_DIR = $(BUILD_DIR)/toolchain
|
||||
TOOLCHAIN_BIN = $(TOOLCHAIN_DIR)/bin
|
||||
@ -196,7 +197,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
|
||||
CMDS = $(notdir $(wildcard cmd/*))
|
||||
|
||||
# Names of the individual images, ommiting the openmatch prefix.
|
||||
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-pool evaluator-go-simple base-build
|
||||
IMAGES = $(CMDS) mmf-go-soloduel base-build
|
||||
|
||||
help:
|
||||
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
|
||||
@ -236,12 +237,6 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
|
||||
build-mmf-go-soloduel-image: docker build-base-build-image
|
||||
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
|
||||
|
||||
build-mmf-go-pool-image: docker build-base-build-image
|
||||
docker build -f test/matchfunction/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-pool:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-pool:$(ALTERNATE_TAG) .
|
||||
|
||||
build-evaluator-go-simple-image: docker build-base-build-image
|
||||
docker build -f test/evaluator/Dockerfile -t $(REGISTRY)/openmatch-evaluator-go-simple:$(TAG) -t $(REGISTRY)/openmatch-evaluator-go-simple:$(ALTERNATE_TAG) .
|
||||
|
||||
#######################################
|
||||
## push-images / push-<image name>-image: builds and pushes images to your
|
||||
## container registry.
|
||||
@ -364,12 +359,16 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
|
||||
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
|
||||
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
|
||||
$(HELM) upgrade $(OPEN_MATCH_HELM_NAME) $(HELM_UPGRADE_FLAGS) --atomic install/helm/open-match $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.ignoreListTTL=500ms \
|
||||
--set open-match-customize.enabled=true \
|
||||
--set open-match-customize.function.enabled=true \
|
||||
--set open-match-customize.evaluator.enabled=true \
|
||||
--set open-match-customize.function.image=openmatch-mmf-go-pool \
|
||||
--set query.replicas=1,frontend.replicas=1,backend.replicas=1,open-match-customize.evaluator.replicas=1,open-match-customize.function.replicas=1 \
|
||||
--set query.replicas=1,frontend.replicas=1,backend.replicas=1 \
|
||||
--set evaluator.hostName=test \
|
||||
--set evaluator.grpcPort=50509 \
|
||||
--set evaluator.httpPort=51509 \
|
||||
--set open-match-core.registrationInterval=200ms \
|
||||
--set open-match-core.proposalCollectionInterval=200ms \
|
||||
--set open-match-core.assignedDeleteTimeout=200ms \
|
||||
--set open-match-core.pendingReleaseTimeout=200ms \
|
||||
--set open-match-core.queryPageSize=10 \
|
||||
--set global.gcpProjectId=intentionally-invalid-value \
|
||||
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
|
||||
--set ci=true
|
||||
|
||||
@ -381,6 +380,10 @@ delete-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubec
|
||||
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)
|
||||
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)-demo
|
||||
|
||||
ifneq ($(BASE_VERSION), 0.0.0-dev)
|
||||
install/yaml/: REGISTRY = gcr.io/$(OPEN_MATCH_PUBLIC_IMAGES_PROJECT_ID)
|
||||
install/yaml/: TAG = $(BASE_VERSION)
|
||||
endif
|
||||
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
|
||||
|
||||
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
|
||||
@ -398,6 +401,7 @@ install/yaml/03-prometheus-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
|
||||
mkdir -p install/yaml/
|
||||
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.enabled=false \
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set open-match-telemetry.enabled=true \
|
||||
--set global.telemetry.prometheus.enabled=true \
|
||||
install/helm/open-match > install/yaml/03-prometheus-chart.yaml
|
||||
@ -406,6 +410,7 @@ install/yaml/04-grafana-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
|
||||
mkdir -p install/yaml/
|
||||
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.enabled=false \
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set open-match-telemetry.enabled=true \
|
||||
--set global.telemetry.grafana.enabled=true \
|
||||
install/helm/open-match > install/yaml/04-grafana-chart.yaml
|
||||
@ -414,6 +419,7 @@ install/yaml/05-jaeger-chart.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
|
||||
mkdir -p install/yaml/
|
||||
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.enabled=false \
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set open-match-telemetry.enabled=true \
|
||||
--set global.telemetry.jaeger.enabled=true \
|
||||
install/helm/open-match > install/yaml/05-jaeger-chart.yaml
|
||||
@ -422,6 +428,7 @@ install/yaml/06-open-match-override-configmap.yaml: build/toolchain/bin/helm$(EX
|
||||
mkdir -p install/yaml/
|
||||
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.enabled=false \
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set open-match-override.enabled=true \
|
||||
-s templates/om-configmap-override.yaml \
|
||||
install/helm/open-match > install/yaml/06-open-match-override-configmap.yaml
|
||||
@ -430,6 +437,7 @@ install/yaml/07-open-match-default-evaluator.yaml: build/toolchain/bin/helm$(EXE
|
||||
mkdir -p install/yaml/
|
||||
$(HELM) template $(OPEN_MATCH_HELM_NAME) $(HELM_TEMPLATE_FLAGS) $(HELM_IMAGE_FLAGS) \
|
||||
--set open-match-core.enabled=false \
|
||||
--set open-match-core.redis.enabled=false \
|
||||
--set open-match-customize.enabled=true \
|
||||
--set open-match-customize.evaluator.enabled=true \
|
||||
install/helm/open-match > install/yaml/07-open-match-default-evaluator.yaml
|
||||
@ -533,13 +541,13 @@ build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
|
||||
mkdir -p $(TOOLCHAIN_BIN)
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
|
||||
build/toolchain/bin/certgen$(EXE_EXTENSION): tools/certgen/certgen$(EXE_EXTENSION)
|
||||
build/toolchain/bin/certgen$(EXE_EXTENSION):
|
||||
mkdir -p $(TOOLCHAIN_BIN)
|
||||
cp -f $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION) $(CERTGEN)
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/certgen/
|
||||
|
||||
build/toolchain/bin/reaper$(EXE_EXTENSION): tools/reaper/reaper$(EXE_EXTENSION)
|
||||
build/toolchain/bin/reaper$(EXE_EXTENSION):
|
||||
mkdir -p $(TOOLCHAIN_BIN)
|
||||
cp -f $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/reaper$(EXE_EXTENSION)
|
||||
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/reaper/
|
||||
|
||||
# Fake target for docker
|
||||
docker: no-sudo
|
||||
@ -589,7 +597,7 @@ get-kind-kubeconfig: build/toolchain/bin/kind$(EXE_EXTENSION)
|
||||
delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
|
||||
-$(KIND) delete cluster
|
||||
|
||||
create-gke-cluster: GKE_VERSION = 1.14.8-gke.17 # gcloud beta container get-server-config --zone us-west1-a
|
||||
create-gke-cluster: GKE_VERSION = 1.14.10-gke.32 # gcloud beta container get-server-config --zone us-west1-a
|
||||
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
|
||||
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
|
||||
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
|
||||
@ -671,9 +679,28 @@ build: assets
|
||||
$(GO) build ./...
|
||||
$(GO) build -tags e2ecluster ./...
|
||||
|
||||
define test_folder
|
||||
$(if $(wildcard $(1)/go.mod), \
|
||||
cd $(1) && \
|
||||
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./... && \
|
||||
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./... \
|
||||
)
|
||||
$(foreach dir, $(wildcard $(1)/*/.), $(call test_folder, $(dir)))
|
||||
endef
|
||||
|
||||
define fast_test_folder
|
||||
$(if $(wildcard $(1)/go.mod), \
|
||||
cd $(1) && \
|
||||
$(GO) test ./... \
|
||||
)
|
||||
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
|
||||
endef
|
||||
|
||||
test: $(ALL_PROTOS) tls-certs third_party/
|
||||
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./...
|
||||
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./...
|
||||
$(call test_folder,.)
|
||||
|
||||
fasttest: $(ALL_PROTOS) tls-certs third_party/
|
||||
$(call fast_test_folder,.)
|
||||
|
||||
test-e2e-cluster: all-protos tls-certs third_party/
|
||||
$(HELM) test --timeout 7m30s -v 0 --logs -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(OPEN_MATCH_HELM_NAME)
|
||||
@ -718,58 +745,6 @@ build/cmd/demo-%/COPY_PHONY:
|
||||
mkdir -p $(BUILD_DIR)/cmd/demo-$*/
|
||||
cp -r examples/demo/static $(BUILD_DIR)/cmd/demo-$*/static
|
||||
|
||||
all: service-binaries example-binaries tools-binaries
|
||||
|
||||
service-binaries: cmd/minimatch/minimatch$(EXE_EXTENSION) cmd/swaggerui/swaggerui$(EXE_EXTENSION)
|
||||
service-binaries: cmd/backend/backend$(EXE_EXTENSION) cmd/frontend/frontend$(EXE_EXTENSION)
|
||||
service-binaries: cmd/query/query$(EXE_EXTENSION) cmd/synchronizer/synchronizer$(EXE_EXTENSION)
|
||||
|
||||
example-binaries: example-mmf-binaries example-evaluator-binaries
|
||||
example-mmf-binaries: examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
|
||||
example-evaluator-binaries: test/evaluator/evaluator$(EXE_EXTENSION)
|
||||
|
||||
examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/examples/functions/golang/soloduel; $(GO_BUILD_COMMAND)
|
||||
|
||||
test/matchfunction/matchfunction$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/test/matchfunction; $(GO_BUILD_COMMAND)
|
||||
|
||||
test/evaluator/evaluator$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/test/evaluator; $(GO_BUILD_COMMAND)
|
||||
|
||||
tools-binaries: tools/certgen/certgen$(EXE_EXTENSION) tools/reaper/reaper$(EXE_EXTENSION)
|
||||
|
||||
cmd/backend/backend$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/cmd/backend; $(GO_BUILD_COMMAND)
|
||||
|
||||
cmd/frontend/frontend$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/cmd/frontend; $(GO_BUILD_COMMAND)
|
||||
|
||||
cmd/query/query$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
|
||||
cd $(REPOSITORY_ROOT)/cmd/query; $(GO_BUILD_COMMAND)
|
||||
|
||||
cmd/synchronizer/synchronizer$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
|
||||
cd $(REPOSITORY_ROOT)/cmd/synchronizer; $(GO_BUILD_COMMAND)
|
||||
|
||||
# Note: This list of dependencies is long but only add file references here. If you add a .PHONY dependency make will always rebuild it.
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/messages.pb.go
|
||||
cmd/minimatch/minimatch$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
|
||||
cd $(REPOSITORY_ROOT)/cmd/minimatch; $(GO_BUILD_COMMAND)
|
||||
|
||||
cmd/swaggerui/swaggerui$(EXE_EXTENSION): third_party/swaggerui/
|
||||
cd $(REPOSITORY_ROOT)/cmd/swaggerui; $(GO_BUILD_COMMAND)
|
||||
|
||||
tools/certgen/certgen$(EXE_EXTENSION):
|
||||
cd $(REPOSITORY_ROOT)/tools/certgen/ && $(GO_BUILD_COMMAND)
|
||||
|
||||
tools/reaper/reaper$(EXE_EXTENSION):
|
||||
cd $(REPOSITORY_ROOT)/tools/reaper/ && $(GO_BUILD_COMMAND)
|
||||
|
||||
build/policies/binauthz.yaml: install/policies/binauthz.yaml
|
||||
mkdir -p $(BUILD_DIR)/policies
|
||||
cp -f $(REPOSITORY_ROOT)/install/policies/binauthz.yaml $(BUILD_DIR)/policies/binauthz.yaml
|
||||
@ -826,7 +801,7 @@ ci-reap-namespaces: build/toolchain/bin/reaper$(EXE_EXTENSION)
|
||||
|
||||
# For presubmit we want to update the protobuf generated files and verify that tests are good.
|
||||
presubmit: GOLANG_TEST_COUNT = 5
|
||||
presubmit: clean third_party/ update-chart-deps assets update-deps lint build install-toolchain test md-test terraform-test
|
||||
presubmit: clean third_party/ update-chart-deps assets update-deps lint build test md-test terraform-test
|
||||
|
||||
build/release/: presubmit clean-install-yaml install/yaml/
|
||||
mkdir -p $(BUILD_DIR)/release/
|
||||
@ -860,19 +835,6 @@ clean-protos:
|
||||
rm -rf $(REPOSITORY_ROOT)/pkg/pb/
|
||||
rm -rf $(REPOSITORY_ROOT)/internal/ipb/
|
||||
|
||||
clean-binaries:
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/backend/backend$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/synchronizer/synchronizer$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/frontend/frontend$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/query/query$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/minimatch/minimatch$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/test/matchfunction/matchfunction$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/test/evaluator/evaluator$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/cmd/swaggerui/swaggerui$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION)
|
||||
rm -rf $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION)
|
||||
|
||||
clean-terraform:
|
||||
rm -rf $(REPOSITORY_ROOT)/install/terraform/.terraform/
|
||||
|
||||
@ -897,7 +859,7 @@ clean-swagger-docs:
|
||||
clean-third-party:
|
||||
rm -rf $(REPOSITORY_ROOT)/third_party/
|
||||
|
||||
clean: clean-images clean-binaries clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
|
||||
clean: clean-images clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
|
||||
|
||||
proxy-frontend: build/toolchain/bin/kubectl$(EXE_EXTENSION)
|
||||
@echo "Frontend Health: http://localhost:$(FRONTEND_PORT)/healthz"
|
||||
@ -964,18 +926,18 @@ third_party/google/api:
|
||||
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
|
||||
mkdir -p $(REPOSITORY_ROOT)/third_party/google/api
|
||||
mkdir -p $(REPOSITORY_ROOT)/third_party/google/rpc
|
||||
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/master.zip
|
||||
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/$(GOOGLE_APIS_VERSION).zip
|
||||
(cd $(TOOLCHAIN_DIR)/googleapis-temp/; unzip -q -o googleapis.zip)
|
||||
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
|
||||
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
|
||||
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
|
||||
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
|
||||
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
|
||||
|
||||
third_party/protoc-gen-swagger/options:
|
||||
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
|
||||
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
|
||||
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/master.zip
|
||||
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
|
||||
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
|
||||
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-master/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
|
||||
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
|
||||
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
|
||||
|
||||
third_party/swaggerui/:
|
||||
|
@ -24,10 +24,6 @@ The [Open Match Development guide](docs/development.md) has detailed instruction
|
||||
on getting the source code, making changes, testing and submitting a pull request
|
||||
to Open Match.
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This software is currently alpha, and subject to change.
|
||||
|
||||
## Support
|
||||
|
||||
* [Slack Channel](https://open-match.slack.com/) ([Signup](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLTM5ZWQxNjc1YWI3MzJmN2RiMWJmYWI0ZjFiNzNkZmNkMWQ3YWU5OGVkNzA5Yzc4OGVkOGU5MTc0OTA5ZTA5NDU))
|
||||
|
@ -88,7 +88,12 @@ message ReleaseTicketsRequest{
|
||||
|
||||
message ReleaseTicketsResponse {}
|
||||
|
||||
message AssignTicketsRequest {
|
||||
message ReleaseAllTicketsRequest{}
|
||||
|
||||
message ReleaseAllTicketsResponse {}
|
||||
|
||||
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
|
||||
message AssignmentGroup{
|
||||
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
|
||||
repeated string ticket_ids = 1;
|
||||
|
||||
@ -96,13 +101,34 @@ message AssignTicketsRequest {
|
||||
Assignment assignment = 2;
|
||||
}
|
||||
|
||||
message AssignTicketsResponse {}
|
||||
// AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status.
|
||||
message AssignmentFailure {
|
||||
enum Cause {
|
||||
UNKNOWN = 0;
|
||||
TICKET_NOT_FOUND = 1;
|
||||
}
|
||||
|
||||
string ticket_id = 1;
|
||||
Cause cause = 2;
|
||||
}
|
||||
|
||||
message AssignTicketsRequest {
|
||||
// Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied.
|
||||
repeated AssignmentGroup assignments = 1;
|
||||
}
|
||||
|
||||
message AssignTicketsResponse {
|
||||
// Failures is a list of all the Tickets that failed assignment along with the cause of failure.
|
||||
repeated AssignmentFailure failures = 1;
|
||||
}
|
||||
|
||||
// The BackendService implements APIs to generate matches and handle ticket assignments.
|
||||
service BackendService {
|
||||
// FetchMatches triggers a MatchFunction with the specified MatchProfile and returns a set of match proposals that
|
||||
// match the description of that MatchProfile.
|
||||
// FetchMatches immediately returns an error if it encounters any execution failures.
|
||||
// FetchMatches triggers a MatchFunction with the specified MatchProfile and
|
||||
// returns a set of matches generated by the Match Making Function, and
|
||||
// accepted by the evaluator.
|
||||
// Tickets in matches returned by FetchMatches are moved from active to
|
||||
// pending, and will not be returned by query.
|
||||
rpc FetchMatches(FetchMatchesRequest) returns (stream FetchMatchesResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/backendservice/matches:fetch"
|
||||
@ -118,9 +144,8 @@ service BackendService {
|
||||
};
|
||||
}
|
||||
|
||||
// ReleaseTickets removes the submitted tickets from the list that prevents tickets
|
||||
// that are awaiting assignment from appearing in MMF queries, effectively putting them back into
|
||||
// the matchmaking pool
|
||||
// ReleaseTickets moves tickets from the pending state, to the active state.
|
||||
// This enables them to be returned by query, and find different matches.
|
||||
//
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
@ -130,4 +155,17 @@ service BackendService {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// ReleaseAllTickets moves all tickets from the pending state, to the active
|
||||
// state. This enables them to be returned by query, and find different
|
||||
// matches.
|
||||
//
|
||||
// BETA FEATURE WARNING: This call and the associated Request and Response
|
||||
// messages are not finalized and still subject to possible change or removal.
|
||||
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/backendservice/tickets:releaseall"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
"paths": {
|
||||
"/v1/backendservice/matches:fetch": {
|
||||
"post": {
|
||||
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile \r\nreturns a set of match proposals. FetchMatches method streams the results back to the caller.\r\nFetchMatches immediately returns an error if it encounters any execution failures.\r\n - If the synchronizer is enabled, FetchMatch will then call the synchronizer to deduplicate proposals with overlapped tickets.",
|
||||
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
|
||||
"operationId": "FetchMatches",
|
||||
"responses": {
|
||||
"200": {
|
||||
@ -38,6 +38,7 @@
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
}
|
||||
@ -71,6 +72,7 @@
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
}
|
||||
@ -90,9 +92,10 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/backend/tickets:release": {
|
||||
"/v1/backendservice/tickets:release": {
|
||||
"post": {
|
||||
"summary": "ReleaseTickets removes the submitted tickets from the list that prevents tickets \r\nthat are awaiting assignment from appearing in MMF queries, effectively putting them back into\r\nthe matchmaking pool",
|
||||
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
|
||||
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "ReleaseTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
@ -104,6 +107,7 @@
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
}
|
||||
@ -119,30 +123,78 @@
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Backend"
|
||||
"BackendService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/backendservice/tickets:releaseall": {
|
||||
"post": {
|
||||
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
|
||||
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
|
||||
"operationId": "ReleaseAllTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchReleaseAllTicketsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchReleaseAllTicketsRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"BackendService"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"AssignmentFailureCause": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"UNKNOWN",
|
||||
"TICKET_NOT_FOUND"
|
||||
],
|
||||
"default": "UNKNOWN"
|
||||
},
|
||||
"openmatchAssignTicketsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ticket_ids": {
|
||||
"assignments": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
"$ref": "#/definitions/openmatchAssignmentGroup"
|
||||
},
|
||||
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
|
||||
"description": "Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied."
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchAssignTicketsResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"failures": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchAssignmentFailure"
|
||||
},
|
||||
"description": "Failures is a list of all the Tickets that failed assignment along with the cause of failure."
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchAssignment": {
|
||||
"type": "object",
|
||||
@ -156,10 +208,39 @@
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\r\nmaking function, evaluator, and components making calls to Open Match.\r\nOptional, depending on the requirements of the connected systems."
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\r\nmatch does not require or inspect any fields on assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchAssignmentFailure": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ticket_id": {
|
||||
"type": "string"
|
||||
},
|
||||
"cause": {
|
||||
"$ref": "#/definitions/AssignmentFailureCause"
|
||||
}
|
||||
},
|
||||
"description": "AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status."
|
||||
},
|
||||
"openmatchAssignmentGroup": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ticket_ids": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
|
||||
}
|
||||
},
|
||||
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
@ -179,18 +260,18 @@
|
||||
"description": "Minimum value."
|
||||
}
|
||||
},
|
||||
"title": "Filters numerical values to only those within a range.\r\n double_arg: \"foo\"\r\n max: 10\r\n min: 5\r\nmatches:\r\n {\"foo\": 5}\r\n {\"foo\": 7.5}\r\n {\"foo\": 10}\r\ndoes not match:\r\n {\"foo\": 4}\r\n {\"foo\": 10.01}\r\n {\"foo\": \"7.5\"}\r\n {}"
|
||||
"title": "Filters numerical values to only those within a range.\n double_arg: \"foo\"\n max: 10\n min: 5\nmatches:\n {\"foo\": 5}\n {\"foo\": 7.5}\n {\"foo\": 10}\ndoes not match:\n {\"foo\": 4}\n {\"foo\": 10.01}\n {\"foo\": \"7.5\"}\n {}"
|
||||
},
|
||||
"openmatchFetchMatchesRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {
|
||||
"$ref": "#/definitions/openmatchFunctionConfig",
|
||||
"title": "FunctionConfig specifies a MMF address and client type for Backend to establish connections with the MMF"
|
||||
"description": "A configuration for the MatchFunction server of this FetchMatches call."
|
||||
},
|
||||
"profile": {
|
||||
"$ref": "#/definitions/openmatchMatchProfile",
|
||||
"description": "MatchProfiles that will be sent to thhe MMF specified in the FunctionConfig."
|
||||
"description": "A MatchProfile that will be sent to the MatchFunction server of this FetchMatches call."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -199,7 +280,7 @@
|
||||
"properties": {
|
||||
"match": {
|
||||
"$ref": "#/definitions/openmatchMatch",
|
||||
"description": "A Match generated by the user-defined MMF with the specified MatchProfiles.\r\nA valid Match response will contain at least one ticket."
|
||||
"description": "A Match generated by the user-defined MMF with the specified MatchProfiles.\nA valid Match response will contain at least one ticket."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -254,14 +335,10 @@
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\r\nmaking function, evaluator, and components making calls to Open Match.\r\nOptional, depending on the requirements of the connected systems."
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"description": "A Match is used to represent a completed match object. It can be generated by\r\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\r\nresponse to the FetchMatches call.\r\nWhen a match is returned by the FetchMatches call, it should contain at least \r\none ticket to be considered as valid."
|
||||
=======
|
||||
"description": "A Match is used to represent a completed match object. It can be generated by\na MatchFunction as a proposal or can be returned by OpenMatch as a result in\nresponse to the FetchMatches call.\nWhen a match is returned by the FetchMatches call, it should contain at least\none ticket to be considered as valid."
|
||||
>>>>>>> cf8d49052c148a3179904700e12cc8115a47af4e
|
||||
},
|
||||
"openmatchMatchProfile": {
|
||||
"type": "object",
|
||||
@ -275,28 +352,17 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchPool"
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"description": "Set of pools to be queried when generating a match for this MatchProfile.\r\nThe pool names can be used in empty Rosters to specify composition of a\r\nmatch."
|
||||
},
|
||||
"rosters": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchRoster"
|
||||
},
|
||||
"description": "Set of Rosters for this match request. Could be empty Rosters used to\r\nindicate the composition of the generated Match or they could be partially\r\npre-populated Ticket list to be used in scenarios such as backfill / join\r\nin progress."
|
||||
=======
|
||||
"description": "Set of pools to be queried when generating a match for this MatchProfile."
|
||||
>>>>>>> cf8d49052c148a3179904700e12cc8115a47af4e
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\r\nmaking function, evaluator, and components making calls to Open Match.\r\nOptional, depending on the requirements of the connected systems."
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "A MatchProfile is Open Match's representation of a Match specification. It is\r\nused to indicate the criteria for selecting players for a match. A\r\nMatchProfile is the input to the API to get matches and is passed to the\r\nMatchFunction. It contains all the information required by the MatchFunction\r\nto generate match proposals."
|
||||
"description": "A MatchProfile is Open Match's representation of a Match specification. It is\nused to indicate the criteria for selecting players for a match. A\nMatchProfile is the input to the API to get matches and is passed to the\nMatchFunction. It contains all the information required by the MatchFunction\nto generate match proposals."
|
||||
},
|
||||
"openmatchPool": {
|
||||
"type": "object",
|
||||
@ -310,7 +376,7 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchDoubleRangeFilter"
|
||||
},
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected players must\r\nmatch every Filter."
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
|
||||
},
|
||||
"string_equals_filters": {
|
||||
"type": "array",
|
||||
@ -323,10 +389,26 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchTagPresentFilter"
|
||||
}
|
||||
},
|
||||
"created_before": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created before the specified time are selected."
|
||||
},
|
||||
"created_after": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created after the specified time are selected."
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
|
||||
},
|
||||
"openmatchReleaseAllTicketsRequest": {
|
||||
"type": "object"
|
||||
},
|
||||
"openmatchReleaseAllTicketsResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"openmatchReleaseTicketsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -335,32 +417,13 @@
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "TicketIds is a list of string representing Open Match generated Ids to be re-enabled for MMF querying\r\nbecause they are no longer awaiting assignment from a previous match result"
|
||||
"title": "TicketIds is a list of string representing Open Match generated Ids to be re-enabled for MMF querying\nbecause they are no longer awaiting assignment from a previous match result"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchReleaseTicketsResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"openmatchRoster": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "A developer-chosen human-readable name for this Roster."
|
||||
},
|
||||
"ticket_ids": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Tickets belonging to this Roster."
|
||||
}
|
||||
},
|
||||
"description": "A Roster is a named collection of Ticket IDs. It exists so that a Tickets\r\nassociated with a Match can be labelled to belong to a team, sub-team etc. It\r\ncan also be used to represent the current state of a Match in scenarios such\r\nas backfill, join-in-progress etc."
|
||||
},
|
||||
=======
|
||||
>>>>>>> cf8d49052c148a3179904700e12cc8115a47af4e
|
||||
"openmatchSearchFields": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -387,7 +450,7 @@
|
||||
"description": "Filterable on presence or absence of given value."
|
||||
}
|
||||
},
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\r\nwhen specifying filters."
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"openmatchStringEqualsFilter": {
|
||||
"type": "object",
|
||||
@ -400,7 +463,7 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"title": "Filters strings exactly equaling a value.\r\n string_arg: \"foo\"\r\n value: \"bar\"\r\nmatches:\r\n {\"foo\": \"bar\"}\r\ndoes not match:\r\n {\"foo\": \"baz\"}\r\n {\"bar\": \"foo\"}\r\n {}"
|
||||
"title": "Filters strings exactly equaling a value.\n string_arg: \"foo\"\n value: \"bar\"\nmatches:\n {\"foo\": \"bar\"}\ndoes not match:\n {\"foo\": \"baz\"}\n {\"bar\": \"foo\"}\n {}"
|
||||
},
|
||||
"openmatchTagPresentFilter": {
|
||||
"type": "object",
|
||||
@ -409,7 +472,7 @@
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"title": "Filters to the tag being present on the search_fields.\r\n tag: \"foo\"\r\nmatches:\r\n [\"foo\"]\r\n [\"bar\",\"foo\"]\r\ndoes not match:\r\n [\"bar\"]\r\n []"
|
||||
"title": "Filters to the tag being present on the search_fields.\n tag: \"foo\"\nmatches:\n [\"foo\"]\n [\"bar\",\"foo\"]\ndoes not match:\n [\"bar\"]\n []"
|
||||
},
|
||||
"openmatchTicket": {
|
||||
"type": "object",
|
||||
@ -420,25 +483,26 @@
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
<<<<<<< HEAD
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. \r\nOpen Match does not require or inspect any fields on Assignment."
|
||||
=======
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
>>>>>>> cf8d49052c148a3179904700e12cc8115a47af4e
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\r\nwhen specifying filters."
|
||||
"description": "Search fields are the fields which Open Match is aware of, and can be used\nwhen specifying filters."
|
||||
},
|
||||
"extensions": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\r\nmaking function, evaluator, and components making calls to Open Match.\r\nOptional, depending on the requirements of the connected systems."
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
}
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\r\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\r\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\r\nof SearchFields. Open Match stores the Ticket in state storage and enables an\r\nAssignment to be associated with this Ticket."
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
@ -455,31 +519,6 @@
|
||||
},
|
||||
"description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := ptypes.MarshalAny(foo)\n ...\n foo := \u0026pb.Foo{}\n if err := ptypes.UnmarshalAny(any, foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }"
|
||||
},
|
||||
<<<<<<< HEAD
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "The status code, which should be an enum value of\r\n[google.rpc.Code][google.rpc.Code]."
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "A developer-facing error message, which should be in English. Any\r\nuser-facing error message should be localized and sent in the\r\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized\r\nby the client."
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "A list of messages that carry the error details. There is a common set of\r\nmessage types for APIs to use."
|
||||
}
|
||||
},
|
||||
"description": "The `Status` type defines a logical error model that is suitable for\r\ndifferent programming environments, including REST APIs and RPC APIs. It is\r\nused by [gRPC](https://github.com/grpc). The error model is designed to be:\r\n\r\n- Simple to use and understand for most users\r\n- Flexible enough to meet unexpected needs\r\n\r\n# Overview\r\n\r\nThe `Status` message contains three pieces of data: error code, error\r\nmessage, and error details. The error code should be an enum value of\r\n[google.rpc.Code][google.rpc.Code], but it may accept additional error codes\r\nif needed. The error message should be a developer-facing English message\r\nthat helps developers *understand* and *resolve* the error. If a localized\r\nuser-facing error message is needed, put the localized message in the error\r\ndetails or localize it in the client. The optional error details may contain\r\narbitrary information about the error. There is a predefined set of error\r\ndetail types in the package `google.rpc` that can be used for common error\r\nconditions.\r\n\r\n# Language mapping\r\n\r\nThe `Status` message is the logical representation of the error model, but it\r\nis not necessarily the actual wire format. When the `Status` message is\r\nexposed in different client libraries and different wire protocols, it can be\r\nmapped differently. For example, it will likely be mapped to some exceptions\r\nin Java, but more likely mapped to some error codes in C.\r\n\r\n# Other uses\r\n\r\nThe error model and the `Status` message can be used in a variety of\r\nenvironments, either with or without APIs, to provide a\r\nconsistent developer experience across different environments.\r\n\r\nExample uses of this error model include:\r\n\r\n- Partial errors. If a service needs to return partial errors to the client,\r\n it may embed the `Status` in the normal response to indicate the partial\r\n errors.\r\n\r\n- Workflow errors. A typical workflow has multiple steps. Each step may\r\n have a `Status` message for error reporting.\r\n\r\n- Batch operations. If a client uses batch request and batch response, the\r\n `Status` message should be used directly inside batch response, one for\r\n each error sub-response.\r\n\r\n- Asynchronous operations. If an API call embeds asynchronous operation\r\n results in its response, the status of those operations should be\r\n represented directly using the `Status` message.\r\n\r\n- Logging. If some API errors are stored in logs, the message `Status` could\r\n be used directly after any stripping needed for security/privacy reasons."
|
||||
},
|
||||
=======
|
||||
>>>>>>> cf8d49052c148a3179904700e12cc8115a47af4e
|
||||
"runtimeStreamError": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -76,7 +76,7 @@
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchEvaluateRequest": {
|
||||
"type": "object",
|
||||
@ -165,7 +165,7 @@
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
@ -177,9 +177,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
}
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
|
@ -20,6 +20,7 @@ option csharp_namespace = "OpenMatch";
|
||||
import "api/messages.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "protoc-gen-swagger/options/annotations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
info: {
|
||||
@ -60,29 +61,22 @@ message CreateTicketRequest {
|
||||
Ticket ticket = 1;
|
||||
}
|
||||
|
||||
message CreateTicketResponse {
|
||||
// A Ticket object with TicketId generated.
|
||||
Ticket ticket = 1;
|
||||
}
|
||||
|
||||
message DeleteTicketRequest {
|
||||
// A TicketId of a generated Ticket to be deleted.
|
||||
string ticket_id = 1;
|
||||
}
|
||||
|
||||
message DeleteTicketResponse {}
|
||||
|
||||
message GetTicketRequest {
|
||||
// A TicketId of a generated Ticket.
|
||||
string ticket_id = 1;
|
||||
}
|
||||
|
||||
message GetAssignmentsRequest {
|
||||
message WatchAssignmentsRequest {
|
||||
// A TicketId of a generated Ticket to get updates on.
|
||||
string ticket_id = 1;
|
||||
}
|
||||
|
||||
message GetAssignmentsResponse {
|
||||
message WatchAssignmentsResponse {
|
||||
// An updated Assignment of the requested Ticket.
|
||||
Assignment assignment = 1;
|
||||
}
|
||||
@ -93,7 +87,7 @@ service FrontendService {
|
||||
// A ticket is considered as ready for matchmaking once it is created.
|
||||
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
|
||||
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
|
||||
rpc CreateTicket(CreateTicketRequest) returns (CreateTicketResponse) {
|
||||
rpc CreateTicket(CreateTicketRequest) returns (Ticket) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/frontendservice/tickets"
|
||||
body: "*"
|
||||
@ -101,10 +95,8 @@ service FrontendService {
|
||||
}
|
||||
|
||||
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
|
||||
// The client must delete the Ticket when finished matchmaking with it.
|
||||
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
|
||||
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
|
||||
rpc DeleteTicket(DeleteTicketRequest) returns (DeleteTicketResponse) {
|
||||
// The client should delete the Ticket when finished matchmaking with it.
|
||||
rpc DeleteTicket(DeleteTicketRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/frontendservice/tickets/{ticket_id}"
|
||||
};
|
||||
@ -117,10 +109,10 @@ service FrontendService {
|
||||
};
|
||||
}
|
||||
|
||||
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
|
||||
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
|
||||
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
|
||||
rpc GetAssignments(GetAssignmentsRequest)
|
||||
returns (stream GetAssignmentsResponse) {
|
||||
rpc WatchAssignments(WatchAssignmentsRequest)
|
||||
returns (stream WatchAssignmentsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
|
||||
};
|
||||
|
@ -32,7 +32,7 @@
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchCreateTicketResponse"
|
||||
"$ref": "#/definitions/openmatchTicket"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -91,13 +91,13 @@
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client must delete the Ticket when finished matchmaking with it. \n - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.\nUsers may still be able to assign/get a ticket after calling DeleteTicket on it.",
|
||||
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
|
||||
"operationId": "DeleteTicket",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchDeleteTicketResponse"
|
||||
"properties": {}
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -124,13 +124,13 @@
|
||||
},
|
||||
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
|
||||
"get": {
|
||||
"summary": "GetAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
|
||||
"operationId": "GetAssignments",
|
||||
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
|
||||
"operationId": "WatchAssignments",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchGetAssignmentsResponse"
|
||||
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
@ -172,7 +172,7 @@
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchCreateTicketRequest": {
|
||||
"type": "object",
|
||||
@ -183,27 +183,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchCreateTicketResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ticket": {
|
||||
"$ref": "#/definitions/openmatchTicket",
|
||||
"description": "A Ticket object with TicketId generated."
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchDeleteTicketResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"openmatchGetAssignmentsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An updated Assignment of the requested Ticket."
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchSearchFields": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -241,7 +220,7 @@
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
@ -253,9 +232,23 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
}
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"openmatchWatchAssignmentsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An updated Assignment of the requested Ticket."
|
||||
}
|
||||
}
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
@ -299,17 +292,17 @@
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchGetAssignmentsResponse": {
|
||||
"openmatchWatchAssignmentsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchGetAssignmentsResponse"
|
||||
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchGetAssignmentsResponse"
|
||||
"title": "Stream result of openmatchWatchAssignmentsResponse"
|
||||
}
|
||||
},
|
||||
"externalDocs": {
|
||||
|
@ -75,7 +75,7 @@
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
@ -165,7 +165,7 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchDoubleRangeFilter"
|
||||
},
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
|
||||
},
|
||||
"string_equals_filters": {
|
||||
"type": "array",
|
||||
@ -178,8 +178,19 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchTagPresentFilter"
|
||||
}
|
||||
},
|
||||
"created_before": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created before the specified time are selected."
|
||||
},
|
||||
"created_after": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created after the specified time are selected."
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
|
||||
},
|
||||
"openmatchRunRequest": {
|
||||
"type": "object",
|
||||
@ -258,7 +269,7 @@
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
@ -270,9 +281,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
}
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
|
@ -19,17 +19,20 @@ option csharp_namespace = "OpenMatch";
|
||||
|
||||
import "google/rpc/status.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an
|
||||
// individual 'Player' or a 'Group' of players. Open Match will not interpret
|
||||
// what the Ticket represents but just treat it as a matchmaking unit with a set
|
||||
// of SearchFields. Open Match stores the Ticket in state storage and enables an
|
||||
// Assignment to be associated with this Ticket.
|
||||
// A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent
|
||||
// an individual 'Player', a 'Group' of players, or any other concepts unique to
|
||||
// your use case. Open Match will not interpret what the Ticket represents but
|
||||
// just treat it as a matchmaking unit with a set of SearchFields. Open Match
|
||||
// stores the Ticket in state storage and enables an Assignment to be set on the
|
||||
// Ticket.
|
||||
message Ticket {
|
||||
// Id represents an auto-generated Id issued by Open Match.
|
||||
string id = 1;
|
||||
|
||||
// An Assignment represents a game server assignment associated with a Ticket.
|
||||
// An Assignment represents a game server assignment associated with a Ticket,
|
||||
// or whatever finalized matched state means for your use case.
|
||||
// Open Match does not require or inspect any fields on Assignment.
|
||||
Assignment assignment = 3;
|
||||
|
||||
@ -42,6 +45,10 @@ message Ticket {
|
||||
// Optional, depending on the requirements of the connected systems.
|
||||
map<string, google.protobuf.Any> extensions = 5;
|
||||
|
||||
// Create time is the time the Ticket was created. It is populated by Open
|
||||
// Match at the time of Ticket creation.
|
||||
google.protobuf.Timestamp create_time = 6;
|
||||
|
||||
// Deprecated fields.
|
||||
reserved 2;
|
||||
}
|
||||
@ -59,8 +66,8 @@ message SearchFields {
|
||||
repeated string tags = 3;
|
||||
}
|
||||
|
||||
// An Assignment represents a game server assignment associated with a Ticket. Open
|
||||
// match does not require or inspect any fields on assignment.
|
||||
// An Assignment represents a game server assignment associated with a Ticket.
|
||||
// Open Match does not require or inspect any fields on assignment.
|
||||
message Assignment {
|
||||
// Connection information for this Assignment.
|
||||
string connection = 1;
|
||||
@ -126,11 +133,13 @@ message TagPresentFilter {
|
||||
string tag = 1;
|
||||
}
|
||||
|
||||
// Pool specfies a set of criteria that are used to select a subset of Tickets
|
||||
// that meet all the criteria.
|
||||
message Pool {
|
||||
// A developer-chosen human-readable name for this Pool.
|
||||
string name = 1;
|
||||
|
||||
// Set of Filters indicating the filtering criteria. Selected players must
|
||||
// Set of Filters indicating the filtering criteria. Selected tickets must
|
||||
// match every Filter.
|
||||
repeated DoubleRangeFilter double_range_filters = 2;
|
||||
|
||||
@ -138,6 +147,12 @@ message Pool {
|
||||
|
||||
repeated TagPresentFilter tag_present_filters = 5;
|
||||
|
||||
// If specified, only Tickets created before the specified time are selected.
|
||||
google.protobuf.Timestamp created_before = 6;
|
||||
|
||||
// If specified, only Tickets created after the specified time are selected.
|
||||
google.protobuf.Timestamp created_after = 7;
|
||||
|
||||
// Deprecated fields.
|
||||
reserved 3;
|
||||
}
|
||||
|
@ -56,25 +56,46 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
|
||||
};
|
||||
|
||||
message QueryTicketsRequest {
|
||||
// A Pool is consists of a set of Filters.
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool pool = 1;
|
||||
}
|
||||
|
||||
message QueryTicketsResponse {
|
||||
// Tickets that satisfy all the filtering criteria.
|
||||
// Tickets that meet all the filtering criteria requested by the pool.
|
||||
repeated Ticket tickets = 1;
|
||||
}
|
||||
|
||||
message QueryTicketIdsRequest {
|
||||
// The Pool representing the set of Filters to be queried.
|
||||
Pool pool = 1;
|
||||
}
|
||||
|
||||
message QueryTicketIdsResponse {
|
||||
// TicketIDs that meet all the filtering criteria requested by the pool.
|
||||
repeated string ids = 1;
|
||||
}
|
||||
|
||||
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
|
||||
service QueryService {
|
||||
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
|
||||
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
|
||||
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
|
||||
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
|
||||
// QueryTickets pages the Tickets by `queryPageSize` and stream back responses.
|
||||
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
|
||||
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/queryservice/tickets:query"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
|
||||
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
|
||||
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
|
||||
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
|
||||
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/queryservice/ticketids:query"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -24,9 +24,43 @@
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/v1/queryservice/ticketids:query": {
|
||||
"post": {
|
||||
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
|
||||
"operationId": "QueryTicketIds",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.(streaming responses)",
|
||||
"schema": {
|
||||
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Returned when the resource does not exist.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"format": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketIdsRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"QueryService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/queryservice/tickets:query": {
|
||||
"post": {
|
||||
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `storage.pool.size` and stream back response.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000",
|
||||
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
|
||||
"operationId": "QueryTickets",
|
||||
"responses": {
|
||||
"200": {
|
||||
@ -75,7 +109,7 @@
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
}
|
||||
},
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
|
||||
},
|
||||
"openmatchDoubleRangeFilter": {
|
||||
"type": "object",
|
||||
@ -109,7 +143,7 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchDoubleRangeFilter"
|
||||
},
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
|
||||
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
|
||||
},
|
||||
"string_equals_filters": {
|
||||
"type": "array",
|
||||
@ -122,6 +156,38 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchTagPresentFilter"
|
||||
}
|
||||
},
|
||||
"created_before": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created before the specified time are selected."
|
||||
},
|
||||
"created_after": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "If specified, only Tickets created after the specified time are selected."
|
||||
}
|
||||
},
|
||||
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
|
||||
},
|
||||
"openmatchQueryTicketIdsRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pool": {
|
||||
"$ref": "#/definitions/openmatchPool",
|
||||
"description": "The Pool representing the set of Filters to be queried."
|
||||
}
|
||||
}
|
||||
},
|
||||
"openmatchQueryTicketIdsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ids": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "TicketIDs that meet all the filtering criteria requested by the pool."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -130,7 +196,7 @@
|
||||
"properties": {
|
||||
"pool": {
|
||||
"$ref": "#/definitions/openmatchPool",
|
||||
"description": "A Pool is consists of a set of Filters."
|
||||
"description": "The Pool representing the set of Filters to be queried."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -142,7 +208,7 @@
|
||||
"items": {
|
||||
"$ref": "#/definitions/openmatchTicket"
|
||||
},
|
||||
"description": "Tickets is a list of Ticket representing one or more Tickets which meet all Filter criterias."
|
||||
"description": "Tickets that meet all the filtering criteria requested by the pool."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -205,7 +271,7 @@
|
||||
},
|
||||
"assignment": {
|
||||
"$ref": "#/definitions/openmatchAssignment",
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
|
||||
},
|
||||
"search_fields": {
|
||||
"$ref": "#/definitions/openmatchSearchFields",
|
||||
@ -217,9 +283,14 @@
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
},
|
||||
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
|
||||
},
|
||||
"create_time": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
|
||||
}
|
||||
},
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
|
||||
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
@ -263,6 +334,18 @@
|
||||
}
|
||||
},
|
||||
"x-stream-definitions": {
|
||||
"openmatchQueryTicketIdsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {
|
||||
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
|
||||
},
|
||||
"error": {
|
||||
"$ref": "#/definitions/runtimeStreamError"
|
||||
}
|
||||
},
|
||||
"title": "Stream result of openmatchQueryTicketIdsResponse"
|
||||
},
|
||||
"openmatchQueryTicketsResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -48,8 +48,8 @@
|
||||
|
||||
steps:
|
||||
- id: 'Docker Image: open-match-build'
|
||||
name: gcr.io/kaniko-project/executor
|
||||
args: ['--destination=gcr.io/$PROJECT_ID/open-match-build', '--cache=true', '--cache-ttl=48h', '--dockerfile=Dockerfile.ci']
|
||||
name: gcr.io/cloud-builders/docker
|
||||
args: ['build', '-t', 'gcr.io/$PROJECT_ID/open-match-build', '-f', 'Dockerfile.ci', '.']
|
||||
waitFor: ['-']
|
||||
|
||||
- id: 'Build: Clean'
|
||||
@ -164,7 +164,7 @@ artifacts:
|
||||
- install/yaml/06-open-match-override-configmap.yaml
|
||||
|
||||
substitutions:
|
||||
_OM_VERSION: "0.0.0-dev"
|
||||
_OM_VERSION: "1.0.0-rc.1"
|
||||
_GCB_POST_SUBMIT: "0"
|
||||
_GCB_LATEST_VERSION: "undefined"
|
||||
logsBucket: 'gs://open-match-build-logs/'
|
||||
|
@ -16,11 +16,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/app/backend"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("backend", config.Read, backend.BindService)
|
||||
appmain.RunApplication("backend", backend.BindService)
|
||||
}
|
||||
|
@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package e2e
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/testing/e2e"
|
||||
"testing"
|
||||
"open-match.dev/open-match/internal/app/evaluator/defaulteval"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
e2e.RunMain(m)
|
||||
func main() {
|
||||
appmain.RunApplication("evaluator", defaulteval.BindService)
|
||||
}
|
@ -16,11 +16,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/app/frontend"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("frontend", config.Read, frontend.BindService)
|
||||
appmain.RunApplication("frontend", frontend.BindService)
|
||||
}
|
||||
|
@ -16,11 +16,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/app/minimatch"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("minimatch", config.Read, minimatch.BindService)
|
||||
appmain.RunApplication("minimatch", minimatch.BindService)
|
||||
}
|
||||
|
@ -16,11 +16,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/app/query"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("query", config.Read, query.BindService)
|
||||
appmain.RunApplication("query", query.BindService)
|
||||
}
|
||||
|
@ -16,10 +16,9 @@ package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/examples/scale/backend"
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("scale", config.Read, backend.BindService)
|
||||
appmain.RunApplication("scale", backend.BindService)
|
||||
}
|
||||
|
@ -16,10 +16,9 @@ package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/examples/scale/frontend"
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("scale", config.Read, frontend.BindService)
|
||||
appmain.RunApplication("scale", frontend.BindService)
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
{
|
||||
"urls": [
|
||||
{"name": "Frontend", "url": "https://open-match.dev/api/v0.0.0-dev/frontend.swagger.json"},
|
||||
{"name": "Backend", "url": "https://open-match.dev/api/v0.0.0-dev/backend.swagger.json"},
|
||||
{"name": "Query", "url": "https://open-match.dev/api/v0.0.0-dev/query.swagger.json"},
|
||||
{"name": "MatchFunction", "url": "https://open-match.dev/api/v0.0.0-dev/matchfunction.swagger.json"},
|
||||
{"name": "Synchronizer", "url": "https://open-match.dev/api/v0.0.0-dev/synchronizer.swagger.json"},
|
||||
{"name": "Evaluator", "url": "https://open-match.dev/api/v0.0.0-dev/evaluator.swagger.json"}
|
||||
{"name": "Frontend", "url": "https://open-match.dev/api/v1.0.0-rc.1/frontend.swagger.json"},
|
||||
{"name": "Backend", "url": "https://open-match.dev/api/v1.0.0-rc.1/backend.swagger.json"},
|
||||
{"name": "Query", "url": "https://open-match.dev/api/v1.0.0-rc.1/query.swagger.json"},
|
||||
{"name": "MatchFunction", "url": "https://open-match.dev/api/v1.0.0-rc.1/matchfunction.swagger.json"},
|
||||
{"name": "Synchronizer", "url": "https://open-match.dev/api/v1.0.0-rc.1/synchronizer.swagger.json"},
|
||||
{"name": "Evaluator", "url": "https://open-match.dev/api/v1.0.0-rc.1/evaluator.swagger.json"}
|
||||
]
|
||||
}
|
@ -16,11 +16,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"open-match.dev/open-match/internal/app"
|
||||
"open-match.dev/open-match/internal/app/synchronizer"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app.RunApplication("synchronizer", config.Read, synchronizer.BindService)
|
||||
appmain.RunApplication("synchronizer", synchronizer.BindService)
|
||||
}
|
||||
|
@ -9,14 +9,12 @@ To build Open Match you'll need the following applications installed.
|
||||
|
||||
* [Git](https://git-scm.com/downloads)
|
||||
* [Go](https://golang.org/doc/install)
|
||||
* [Python3 with virtualenv](https://wiki.python.org/moin/BeginnersGuide/Download)
|
||||
* Make (Mac: install [XCode](https://itunes.apple.com/us/app/xcode/id497799835))
|
||||
* [Docker](https://docs.docker.com/install/) including the
|
||||
[post-install steps](https://docs.docker.com/install/linux/linux-postinstall/).
|
||||
|
||||
Optional Software
|
||||
|
||||
* [Google Cloud Platform](gcloud.md)
|
||||
* [Visual Studio Code](https://code.visualstudio.com/Download) for IDE.
|
||||
Vim and Emacs work to.
|
||||
* [VirtualBox](https://www.virtualbox.org/wiki/Downloads) recommended for
|
||||
@ -27,8 +25,7 @@ running:
|
||||
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y -q python3 python3-virtualenv virtualenv make \
|
||||
google-cloud-sdk git unzip tar
|
||||
sudo apt-get install -y -q make google-cloud-sdk git unzip tar
|
||||
```
|
||||
|
||||
*It's recommended that you install Go using their instructions because package
|
||||
@ -51,13 +48,11 @@ make
|
||||
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
|
||||
but for purpose of this guide we'll be using the upstream/master.*
|
||||
|
||||
## Building
|
||||
## Building code and images
|
||||
|
||||
```bash
|
||||
# Reset workspace
|
||||
make clean
|
||||
# Compile all the binaries
|
||||
make all -j$(nproc)
|
||||
# Run tests
|
||||
make test
|
||||
# Build all the images.
|
||||
@ -87,11 +82,9 @@ default context the Makefile will honor that._
|
||||
# GKE cluster: make create-gke-cluster/delete-gke-cluster
|
||||
# or create a local Minikube cluster
|
||||
make create-gke-cluster
|
||||
# Step 2: Download helm and install Tiller in the cluster
|
||||
make push-helm
|
||||
# Step 3: Build and Push Open Match Images to gcr.io
|
||||
# Step 2: Build and Push Open Match Images to gcr.io
|
||||
make push-images -j$(nproc)
|
||||
# Install Open Match in the cluster.
|
||||
# Step 3: Install Open Match in the cluster.
|
||||
make install-chart
|
||||
|
||||
# Create a proxy to Open Match pods so that you can access them locally.
|
||||
@ -105,12 +98,29 @@ make proxy
|
||||
make delete-chart
|
||||
```
|
||||
|
||||
## Interaction
|
||||
## Iterating
|
||||
While iterating on the project, you may need to:
|
||||
1. Install/Run everything
|
||||
2. Make some code changes
|
||||
3. Make sure the changes compile by running `make test`
|
||||
4. Build and push Docker images to your personal registry by running `make push-images -j$(nproc)`
|
||||
5. Deploy the code change by running `make install-chart`
|
||||
6. Verify it's working by [looking at the logs](#accessing-logs) or looking at the monitoring dashboard by running `make proxy-grafana`
|
||||
7. Tear down Open Match by running `make delete-chart`
|
||||
|
||||
Before integrating with Open Match you can manually interact with it to get a feel for how it works.
|
||||
## Accessing logs
|
||||
To look at Open Match core services' logs, run:
|
||||
```bash
|
||||
# Replace om-frontend with the service name that you would like to access
|
||||
kubectl logs -n open-match svc/om-frontend
|
||||
```
|
||||
|
||||
`make proxy-ui` exposes the Swagger UI for Open Match locally on your computer.
|
||||
You can then go to http://localhost:51500 and view the API as well as interactively call Open Match.
|
||||
## API References
|
||||
While integrating with Open Match you may want to understand its API surface concepts or interact with it and get a feel for how it works.
|
||||
|
||||
The APIs are defined in `proto` format under the `api/` folder, with references available at [open-match.dev](https://open-match.dev/site/docs/reference/api/).
|
||||
|
||||
You can also run `make proxy-ui` to exposes the Swagger UI for Open Match locally on your computer after [deploying it to Kubernetes](#deploying-to-kubernetes), then go to http://localhost:51500 and view the REST APIs as well as interactively call Open Match.
|
||||
|
||||
By default you will be talking to the frontend server but you can change the target API url to any of the following:
|
||||
|
||||
@ -144,55 +154,9 @@ export GOPATH=$HOME/workspace/
|
||||
|
||||
## Pull Requests
|
||||
|
||||
If you want to submit a Pull Request there's some tools to help prepare your
|
||||
change.
|
||||
|
||||
```bash
|
||||
# Runs code generators, tests, and linters.
|
||||
make presubmit
|
||||
```
|
||||
|
||||
`make presubmit` catches most of the issues your change can run into. If the
|
||||
submit checks fail you can run it locally via,
|
||||
|
||||
```bash
|
||||
make local-cloud-build
|
||||
```
|
||||
If you want to submit a Pull Request, `make presubmit` can catch most of the issues your change can run into.
|
||||
|
||||
Our [continuous integration](https://console.cloud.google.com/cloud-build/builds?project=open-match-build)
|
||||
runs against all PRs. In order to see your build results you'll need to
|
||||
become a member of
|
||||
[open-match-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/open-match-discuss).
|
||||
|
||||
|
||||
## Makefile
|
||||
|
||||
The Makefile is the core of Open Match's build process. There's a lot of
|
||||
commands but here's a list of the important ones and patterns to remember them.
|
||||
|
||||
```bash
|
||||
# Help
|
||||
make
|
||||
|
||||
# Reset workspace (delete all build artifacts)
|
||||
make clean
|
||||
# Delete auto-generated protobuf code and swagger API docs.
|
||||
make clean-protos clean-swagger-docs
|
||||
# make clean-* deletes some part of the build outputs.
|
||||
|
||||
# Build all Docker images
|
||||
make build-images
|
||||
# Build frontend docker image.
|
||||
make build-frontend-image
|
||||
|
||||
# Formats, Vets, and tests the codebase.
|
||||
make fmt vet test
|
||||
# Same as above also regenerates autogen files.
|
||||
make presubmit
|
||||
|
||||
# Run website on http://localhost:8080
|
||||
make run-site
|
||||
|
||||
# Proxy all Open Match processes to view them.
|
||||
make proxy
|
||||
```
|
||||
|
@ -102,7 +102,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ticketId = resp.Ticket.Id
|
||||
ticketId = resp.Id
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@ -111,11 +111,11 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
|
||||
|
||||
var assignment *pb.Assignment
|
||||
{
|
||||
req := &pb.GetAssignmentsRequest{
|
||||
req := &pb.WatchAssignmentsRequest{
|
||||
TicketId: ticketId,
|
||||
}
|
||||
|
||||
stream, err := fe.GetAssignments(ctx, req)
|
||||
stream, err := fe.WatchAssignments(ctx, req)
|
||||
for assignment.GetConnection() == "" {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
|
@ -131,9 +131,13 @@ func run(ds *components.DemoShared) {
|
||||
}
|
||||
|
||||
req := &pb.AssignTicketsRequest{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
20
examples/scale/README.md
Normal file
20
examples/scale/README.md
Normal file
@ -0,0 +1,20 @@
|
||||
## How to use this framework
|
||||
|
||||
This is the framework that we use to benchmark Open Match against different matchmaking scenarios. For now (02/24/2020), this framework supports a Battle Royale, a Basic 1v1 matchmaking, and a Team Shooter scenario. You are welcome to write up your own `Scenario`, test it, and share the number that you are able to get to us.
|
||||
|
||||
1. The `Scenario` struct under the `scenarios/scenarios.go` file defines the parameters that this framework currently support/plan to support.
|
||||
2. Each subpackage `battleroyal`, `firstmatch`, and `teamshooter` implements to `GameScenario` interface defined under `scenarios/scenarios.go` file. Feel free to write your own benchmark scenario by implementing the interface.
|
||||
- Ticket `func() *pb.Ticket` - Tickets generator
|
||||
- Profiles `func() []*pb.MatchProfile` - Profiles generator
|
||||
- MMF `MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)` - Custom matchmaking logic using a MatchProfile and a map struct that contains the mapping from pool name to the tickets of that pool.
|
||||
- Evaluate `Evaluate(stream pb.Evaluator_EvaluateServer) error` - Custom logic implementation of the evaluator.
|
||||
|
||||
Follow the instructions below if you want to use any of the existing benchmarking scenarios.
|
||||
|
||||
1. Open the `scenarios.go` file under the scenarios directory.
|
||||
2. Change the value of the `ActiveScenario` variable to the scenario that you would like Open Match to run against.
|
||||
3. Make sure you have `kubectl` connected to an existing Kubernetes cluster and run `make push-images` followed by `make install-scale-chart` to push the images and install Open Match core along with the scale components in the cluster.
|
||||
4. Run `make proxy`
|
||||
- Open `localhost:3000` to see the Grafana dashboards.
|
||||
- Open `localhost:9090` to see the Prometheus query server.
|
||||
- Open `localhost:[COMPONENT_HTTP_ENDPOINT]/help` to see how to access the zpages.
|
@ -25,6 +25,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"open-match.dev/open-match/examples/scale/scenarios"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
@ -38,7 +39,6 @@ var (
|
||||
})
|
||||
|
||||
activeScenario = scenarios.ActiveScenario
|
||||
statProcessor = scenarios.NewStatProcessor()
|
||||
|
||||
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
|
||||
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
|
||||
@ -54,8 +54,8 @@ var (
|
||||
|
||||
// Run triggers execution of functions that continuously fetch, assign and
|
||||
// delete matches.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
go run(cfg)
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
go run(p.Config())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -76,8 +76,6 @@ func run(cfg config.View) {
|
||||
defer feConn.Close()
|
||||
fe := pb.NewFrontendServiceClient(feConn)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
@ -94,7 +92,6 @@ func run(cfg config.View) {
|
||||
for range time.Tick(time.Millisecond * 250) {
|
||||
// Keep pulling matches from Open Match backend
|
||||
profiles := activeScenario.Profiles()
|
||||
statProcessor.SetStat("TotalProfiles", len(profiles))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, p := range profiles {
|
||||
@ -107,9 +104,7 @@ func run(cfg config.View) {
|
||||
|
||||
// Wait for all profiles to complete before proceeding.
|
||||
wg.Wait()
|
||||
statProcessor.SetStat("TimeElapsed", time.Since(startTime).String())
|
||||
telemetry.RecordUnitMeasurement(context.Background(), mIterations)
|
||||
statProcessor.Log(w)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,7 +125,7 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
|
||||
stream, err := be.FetchMatches(ctx, req)
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
|
||||
statProcessor.RecordError("failed to get available stream client", err)
|
||||
logger.WithError(err).Error("failed to get available stream client")
|
||||
return
|
||||
}
|
||||
|
||||
@ -144,13 +139,12 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
|
||||
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
|
||||
statProcessor.RecordError("failed to get matches from stream client", err)
|
||||
logger.WithError(err).Error("failed to get matches from stream client")
|
||||
return
|
||||
}
|
||||
|
||||
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
|
||||
statProcessor.IncrementStat("MatchCount", 1)
|
||||
|
||||
matchesForAssignment <- resp.GetMatch()
|
||||
}
|
||||
@ -167,19 +161,22 @@ func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.
|
||||
|
||||
if activeScenario.BackendAssignsTickets {
|
||||
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: ids,
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
|
||||
statProcessor.RecordError("failed to assign tickets", err)
|
||||
logger.WithError(err).Error("failed to assign tickets")
|
||||
continue
|
||||
}
|
||||
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
|
||||
statProcessor.IncrementStat("Assigned", len(ids))
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
@ -201,10 +198,9 @@ func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string)
|
||||
|
||||
if err == nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
|
||||
statProcessor.IncrementStat("Deleted", 1)
|
||||
} else {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
|
||||
statProcessor.RecordError("failed to delete tickets", err)
|
||||
logger.WithError(err).Error("failed to delete tickets")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,13 +16,15 @@ package frontend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"open-match.dev/open-match/examples/scale/scenarios"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
@ -34,20 +36,18 @@ var (
|
||||
"app": "openmatch",
|
||||
"component": "scale.frontend",
|
||||
})
|
||||
activeScenario = scenarios.ActiveScenario
|
||||
statProcessor = scenarios.NewStatProcessor()
|
||||
numOfRoutineCreate = 8
|
||||
|
||||
totalCreated uint32
|
||||
activeScenario = scenarios.ActiveScenario
|
||||
|
||||
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
|
||||
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
|
||||
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
|
||||
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
|
||||
)
|
||||
|
||||
// Run triggers execution of the scale frontend component that creates
|
||||
// tickets at scale in Open Match.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
go run(cfg)
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
go run(p.Config())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -61,75 +61,92 @@ func run(cfg config.View) {
|
||||
}
|
||||
fe := pb.NewFrontendServiceClient(conn)
|
||||
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
|
||||
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
|
||||
|
||||
for {
|
||||
currentCreated := int(atomic.LoadUint32(&totalCreated))
|
||||
if ticketTotal != -1 && currentCreated >= ticketTotal {
|
||||
break
|
||||
}
|
||||
totalCreated := 0
|
||||
|
||||
// Each inner loop creates TicketCreatedQPS tickets
|
||||
var ticketPerRoutine, ticketModRoutine int
|
||||
start := time.Now()
|
||||
|
||||
if ticketTotal == -1 || currentCreated+ticketQPS <= ticketTotal {
|
||||
ticketPerRoutine = ticketQPS / numOfRoutineCreate
|
||||
ticketModRoutine = ticketQPS % numOfRoutineCreate
|
||||
} else {
|
||||
ticketPerRoutine = (ticketTotal - currentCreated) / numOfRoutineCreate
|
||||
ticketModRoutine = (ticketTotal - currentCreated) % numOfRoutineCreate
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numOfRoutineCreate; i++ {
|
||||
wg.Add(1)
|
||||
if i < ticketModRoutine {
|
||||
go createPerCycle(&wg, fe, ticketPerRoutine+1, start)
|
||||
} else {
|
||||
go createPerCycle(&wg, fe, ticketPerRoutine, start)
|
||||
for range time.Tick(time.Second) {
|
||||
for i := 0; i < ticketQPS; i++ {
|
||||
if ticketTotal == -1 || totalCreated < ticketTotal {
|
||||
go runner(fe)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all concurrent creates to complete.
|
||||
wg.Wait()
|
||||
statProcessor.SetStat("TotalCreated", atomic.LoadUint32(&totalCreated))
|
||||
statProcessor.Log(w)
|
||||
}
|
||||
}
|
||||
|
||||
func createPerCycle(wg *sync.WaitGroup, fe pb.FrontendServiceClient, ticketPerRoutine int, start time.Time) {
|
||||
defer wg.Done()
|
||||
cycleCreated := 0
|
||||
func runner(fe pb.FrontendServiceClient) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for j := 0; j < ticketPerRoutine; j++ {
|
||||
req := &pb.CreateTicketRequest{
|
||||
Ticket: activeScenario.Ticket(),
|
||||
}
|
||||
g := stateGauge{}
|
||||
defer g.stop()
|
||||
|
||||
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateTicket")
|
||||
defer span.End()
|
||||
g.start(mRunnersWaiting)
|
||||
// A random sleep at the start of the worker evens calls out over the second
|
||||
// period, and makes timing between ticket creation calls a more realistic
|
||||
// poisson distribution.
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
|
||||
timeLeft := start.Add(time.Second).Sub(time.Now())
|
||||
if timeLeft <= 0 {
|
||||
break
|
||||
}
|
||||
ticketsLeft := ticketPerRoutine - cycleCreated
|
||||
|
||||
time.Sleep(timeLeft / time.Duration(ticketsLeft))
|
||||
|
||||
if _, err := fe.CreateTicket(ctx, req); err == nil {
|
||||
cycleCreated++
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
|
||||
} else {
|
||||
statProcessor.RecordError("failed to create a ticket", err)
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
|
||||
}
|
||||
g.start(mRunnersCreating)
|
||||
id, err := createTicket(ctx, fe)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to create a ticket")
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddUint32(&totalCreated, uint32(cycleCreated))
|
||||
_ = id
|
||||
}
|
||||
|
||||
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "scale.frontend/CreateTicket")
|
||||
defer span.End()
|
||||
|
||||
req := &pb.CreateTicketRequest{
|
||||
Ticket: activeScenario.Ticket(),
|
||||
}
|
||||
|
||||
resp, err := fe.CreateTicket(ctx, req)
|
||||
if err != nil {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
|
||||
return "", err
|
||||
}
|
||||
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
|
||||
return resp.Id, nil
|
||||
}
|
||||
|
||||
// Allows concurrent moficiation of a gauge value by modifying the concurrent
|
||||
// value with a delta.
|
||||
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {
|
||||
m := sync.Mutex{}
|
||||
v := int64(0)
|
||||
return func(delta int64) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
v += delta
|
||||
telemetry.SetGauge(context.Background(), s, v)
|
||||
}
|
||||
}
|
||||
|
||||
// stateGauge will have a single value be applied to one gauge at a time.
|
||||
type stateGauge struct {
|
||||
f func(int64)
|
||||
}
|
||||
|
||||
// start begins a stage measured in a gauge, stopping any previously started
|
||||
// stage.
|
||||
func (g *stateGauge) start(f func(int64)) {
|
||||
g.stop()
|
||||
g.f = f
|
||||
f(1)
|
||||
}
|
||||
|
||||
// stop finishes the current stage by decrementing the gauge.
|
||||
func (g *stateGauge) stop() {
|
||||
if g.f != nil {
|
||||
g.f(-1)
|
||||
g.f = nil
|
||||
}
|
||||
}
|
||||
|
@ -1,85 +0,0 @@
|
||||
package scenarios
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
battleRoyalRegions = 20
|
||||
regionArg = "region"
|
||||
)
|
||||
|
||||
var (
|
||||
battleRoyalScenario = &Scenario{
|
||||
MMF: queryPoolsWrapper(battleRoyalMmf),
|
||||
Evaluator: fifoEvaluate,
|
||||
FrontendTotalTicketsToCreate: -1,
|
||||
FrontendTicketCreatedQPS: 100,
|
||||
BackendAssignsTickets: true,
|
||||
BackendDeletesTickets: true,
|
||||
Ticket: battleRoyalTicket,
|
||||
Profiles: battleRoyalProfile,
|
||||
}
|
||||
)
|
||||
|
||||
func battleRoyalProfile() []*pb.MatchProfile {
|
||||
p := []*pb.MatchProfile{}
|
||||
|
||||
for i := 0; i < battleRoyalRegions; i++ {
|
||||
p = append(p, &pb.MatchProfile{
|
||||
Name: battleRoyalRegionName(i),
|
||||
Pools: []*pb.Pool{
|
||||
{
|
||||
Name: poolName,
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: regionArg,
|
||||
Value: battleRoyalRegionName(i),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func battleRoyalTicket() *pb.Ticket {
|
||||
// Simple way to give an uneven distribution of region population.
|
||||
a := rand.Intn(battleRoyalRegions) + 1
|
||||
r := rand.Intn(a)
|
||||
|
||||
return &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
regionArg: battleRoyalRegionName(r),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func battleRoyalMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
const playersInMatch = 100
|
||||
|
||||
tickets := poolTickets[poolName]
|
||||
var matches []*pb.Match
|
||||
|
||||
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
|
||||
matches = append(matches, &pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
Tickets: tickets[i : i+playersInMatch],
|
||||
MatchProfile: p.GetName(),
|
||||
MatchFunction: "battleRoyal",
|
||||
})
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
func battleRoyalRegionName(i int) string {
|
||||
return fmt.Sprintf("region_%d", i)
|
||||
}
|
141
examples/scale/scenarios/battleroyal/battleroyal.go
Normal file
141
examples/scale/scenarios/battleroyal/battleroyal.go
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package battleroyal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
poolName = "all"
|
||||
regionArg = "region"
|
||||
)
|
||||
|
||||
func battleRoyalRegionName(i int) string {
|
||||
return fmt.Sprintf("region_%d", i)
|
||||
}
|
||||
|
||||
func Scenario() *BattleRoyalScenario {
|
||||
return &BattleRoyalScenario{
|
||||
regions: 20,
|
||||
}
|
||||
}
|
||||
|
||||
type BattleRoyalScenario struct {
|
||||
regions int
|
||||
}
|
||||
|
||||
func (b *BattleRoyalScenario) Profiles() []*pb.MatchProfile {
|
||||
p := []*pb.MatchProfile{}
|
||||
|
||||
for i := 0; i < b.regions; i++ {
|
||||
p = append(p, &pb.MatchProfile{
|
||||
Name: battleRoyalRegionName(i),
|
||||
Pools: []*pb.Pool{
|
||||
{
|
||||
Name: poolName,
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: regionArg,
|
||||
Value: battleRoyalRegionName(i),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
|
||||
// Simple way to give an uneven distribution of region population.
|
||||
a := rand.Intn(b.regions) + 1
|
||||
r := rand.Intn(a)
|
||||
|
||||
return &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
regionArg: battleRoyalRegionName(r),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
const playersInMatch = 100
|
||||
|
||||
tickets := poolTickets[poolName]
|
||||
var matches []*pb.Match
|
||||
|
||||
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
|
||||
matches = append(matches, &pb.Match{
|
||||
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
Tickets: tickets[i : i+playersInMatch],
|
||||
MatchProfile: p.GetName(),
|
||||
MatchFunction: "battleRoyal",
|
||||
})
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
|
||||
// previously accepted match. Essentially first to claim the ticket wins.
|
||||
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
used := map[string]struct{}{}
|
||||
|
||||
// TODO: once the evaluator client supports sending and recieving at the
|
||||
// same time, don't buffer, just send results immediately.
|
||||
matchIDs := []string{}
|
||||
|
||||
outer:
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading evaluator input stream: %w", err)
|
||||
}
|
||||
|
||||
m := req.GetMatch()
|
||||
|
||||
for _, t := range m.Tickets {
|
||||
if _, ok := used[t.Id]; ok {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range m.Tickets {
|
||||
used[t.Id] = struct{}{}
|
||||
}
|
||||
|
||||
matchIDs = append(matchIDs, m.GetMatchId())
|
||||
}
|
||||
|
||||
for _, mID := range matchIDs {
|
||||
err := stream.Send(&pb.EvaluateResponse{MatchId: mID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error sending evaluator output stream: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
42
examples/scale/scenarios/first_match_scenario.go → examples/scale/scenarios/firstmatch/firstmatch.go
42
examples/scale/scenarios/first_match_scenario.go → examples/scale/scenarios/firstmatch/firstmatch.go
@ -1,4 +1,18 @@
|
||||
package scenarios
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firstmatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -12,20 +26,14 @@ const (
|
||||
poolName = "all"
|
||||
)
|
||||
|
||||
var (
|
||||
firstMatchScenario = &Scenario{
|
||||
MMF: queryPoolsWrapper(firstMatchMmf),
|
||||
Evaluator: fifoEvaluate,
|
||||
FrontendTotalTicketsToCreate: -1,
|
||||
FrontendTicketCreatedQPS: 100,
|
||||
BackendAssignsTickets: true,
|
||||
BackendDeletesTickets: true,
|
||||
Ticket: firstMatchTicket,
|
||||
Profiles: firstMatchProfile,
|
||||
}
|
||||
)
|
||||
func Scenario() *FirstMatchScenario {
|
||||
return &FirstMatchScenario{}
|
||||
}
|
||||
|
||||
func firstMatchProfile() []*pb.MatchProfile {
|
||||
type FirstMatchScenario struct {
|
||||
}
|
||||
|
||||
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
|
||||
return []*pb.MatchProfile{
|
||||
{
|
||||
Name: "entirePool",
|
||||
@ -38,11 +46,11 @@ func firstMatchProfile() []*pb.MatchProfile {
|
||||
}
|
||||
}
|
||||
|
||||
func firstMatchTicket() *pb.Ticket {
|
||||
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
|
||||
return &pb.Ticket{}
|
||||
}
|
||||
|
||||
func firstMatchMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
tickets := poolTickets[poolName]
|
||||
var matches []*pb.Match
|
||||
|
||||
@ -60,7 +68,7 @@ func firstMatchMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*
|
||||
|
||||
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
|
||||
// previously accepted match. Essentially first to claim the ticket wins.
|
||||
func fifoEvaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
used := map[string]struct{}{}
|
||||
|
||||
// TODO: once the evaluator client supports sending and recieving at the
|
@ -14,10 +14,65 @@
|
||||
|
||||
package scenarios
|
||||
|
||||
import "open-match.dev/open-match/pkg/pb"
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
|
||||
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
|
||||
"open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/matchfunction"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
|
||||
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "scale",
|
||||
})
|
||||
)
|
||||
|
||||
// GameScenario defines what tickets look like, and how they should be matched.
|
||||
type GameScenario interface {
|
||||
// Ticket creates a new ticket, with randomized parameters.
|
||||
Ticket() *pb.Ticket
|
||||
|
||||
// Profiles lists all of the profiles that should run.
|
||||
Profiles() []*pb.MatchProfile
|
||||
|
||||
// MatchFunction is the custom logic implementation of the match function.
|
||||
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
|
||||
|
||||
// Evaluate is the custom logic implementation of the evaluator.
|
||||
Evaluate(stream pb.Evaluator_EvaluateServer) error
|
||||
}
|
||||
|
||||
// ActiveScenario sets the scenario with preset parameters that we want to use for current Open Match benchmark run.
|
||||
var ActiveScenario = battleRoyalScenario
|
||||
var ActiveScenario = func() *Scenario {
|
||||
var gs GameScenario = firstmatch.Scenario()
|
||||
|
||||
// TODO: Select which scenario to use based on some configuration or choice,
|
||||
// so it's easier to run different scenarios without changing code.
|
||||
gs = battleroyal.Scenario()
|
||||
gs = teamshooter.Scenario()
|
||||
|
||||
return &Scenario{
|
||||
FrontendTotalTicketsToCreate: -1,
|
||||
FrontendTicketCreatedQPS: 100,
|
||||
|
||||
BackendAssignsTickets: true,
|
||||
BackendDeletesTickets: true,
|
||||
|
||||
Ticket: gs.Ticket,
|
||||
Profiles: gs.Profiles,
|
||||
|
||||
MMF: queryPoolsWrapper(gs.MatchFunction),
|
||||
Evaluator: gs.Evaluate,
|
||||
}
|
||||
}()
|
||||
|
||||
// Scenario defines the controllable fields for Open Match benchmark scenarios
|
||||
type Scenario struct {
|
||||
@ -58,3 +113,44 @@ func (mmf matchFunction) Run(req *pb.RunRequest, srv pb.MatchFunction_RunServer)
|
||||
func (eval evaluatorFunction) Evaluate(srv pb.Evaluator_EvaluateServer) error {
|
||||
return eval(srv)
|
||||
}
|
||||
|
||||
func getQueryServiceGRPCClient() pb.QueryServiceClient {
|
||||
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to connect to Open Match, got %v", err)
|
||||
}
|
||||
return pb.NewQueryServiceClient(conn)
|
||||
}
|
||||
|
||||
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
|
||||
var q pb.QueryServiceClient
|
||||
var startQ sync.Once
|
||||
|
||||
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
|
||||
startQ.Do(func() {
|
||||
q = getQueryServiceGRPCClient()
|
||||
})
|
||||
|
||||
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proposals, err := mmf(req.GetProfile(), poolTickets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.WithFields(logrus.Fields{
|
||||
"proposals": proposals,
|
||||
}).Trace("proposals returned by match function")
|
||||
|
||||
for _, proposal := range proposals {
|
||||
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
330
examples/scale/scenarios/teamshooter/teamshooter.go
Normal file
330
examples/scale/scenarios/teamshooter/teamshooter.go
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// TeamShooterScenario is a scenario which is designed to emulate the
|
||||
// approximate behavior to open match that a skill based team game would have.
|
||||
// It doesn't try to provide good matchmaking for real players. There are three
|
||||
// arguments used:
|
||||
// mode: The game mode the players wants to play in. mode is a hard partition.
|
||||
// regions: Players may have good latency to one or more regions. A player will
|
||||
// search for matches in all eligible regions.
|
||||
// skill: Players have a random skill based on a normal distribution. Players
|
||||
// will only be matched with other players who have a close skill value. The
|
||||
// match functions have overlapping partitions of the skill brackets.
|
||||
package teamshooter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
const (
|
||||
poolName = "all"
|
||||
skillArg = "skill"
|
||||
modeArg = "mode"
|
||||
)
|
||||
|
||||
// TeamShooterScenario provides the required methods for running a scenario.
|
||||
type TeamShooterScenario struct {
|
||||
// Names of available region tags.
|
||||
regions []string
|
||||
// Maximum regions a player can search in.
|
||||
maxRegions int
|
||||
// Number of tickets which form a match.
|
||||
playersPerGame int
|
||||
// For each pair of consequitive values, the value to split profiles on by
|
||||
// skill.
|
||||
skillBoundaries []float64
|
||||
// Maximum difference between two tickets to consider a match valid.
|
||||
maxSkillDifference float64
|
||||
// List of mode names.
|
||||
modes []string
|
||||
// Returns a random mode, with some weight.
|
||||
randomMode func() string
|
||||
}
|
||||
|
||||
// Scenario creates a new TeamShooterScenario.
|
||||
func Scenario() *TeamShooterScenario {
|
||||
|
||||
modes, randomMode := weightedChoice(map[string]int{
|
||||
"pl": 100, // Payload, very popular.
|
||||
"cp": 25, // Capture point, 1/4 as popular.
|
||||
})
|
||||
|
||||
regions := []string{}
|
||||
for i := 0; i < 2; i++ {
|
||||
regions = append(regions, fmt.Sprintf("region_%d", i))
|
||||
}
|
||||
|
||||
return &TeamShooterScenario{
|
||||
regions: regions,
|
||||
maxRegions: 1,
|
||||
playersPerGame: 12,
|
||||
skillBoundaries: []float64{math.Inf(-1), 0, math.Inf(1)},
|
||||
maxSkillDifference: 0.01,
|
||||
modes: modes,
|
||||
randomMode: randomMode,
|
||||
}
|
||||
}
|
||||
|
||||
// Profiles shards the player base on mode, region, and skill.
|
||||
func (t *TeamShooterScenario) Profiles() []*pb.MatchProfile {
|
||||
p := []*pb.MatchProfile{}
|
||||
|
||||
for _, region := range t.regions {
|
||||
for _, mode := range t.modes {
|
||||
for i := 0; i+1 < len(t.skillBoundaries); i++ {
|
||||
skillMin := t.skillBoundaries[i] - t.maxSkillDifference/2
|
||||
skillMax := t.skillBoundaries[i+1] + t.maxSkillDifference/2
|
||||
p = append(p, &pb.MatchProfile{
|
||||
Name: fmt.Sprintf("%s_%s_%v-%v", region, mode, skillMin, skillMax),
|
||||
Pools: []*pb.Pool{
|
||||
{
|
||||
Name: poolName,
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: skillArg,
|
||||
Min: skillMin,
|
||||
Max: skillMax,
|
||||
},
|
||||
},
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: region,
|
||||
},
|
||||
},
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: modeArg,
|
||||
Value: mode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Ticket creates a randomized player.
|
||||
func (t *TeamShooterScenario) Ticket() *pb.Ticket {
|
||||
region := rand.Intn(len(t.regions))
|
||||
numRegions := rand.Intn(t.maxRegions) + 1
|
||||
|
||||
tags := []string{}
|
||||
for i := 0; i < numRegions; i++ {
|
||||
tags = append(tags, t.regions[region])
|
||||
// The Earth is actually a circle.
|
||||
region = (region + 1) % len(t.regions)
|
||||
}
|
||||
|
||||
return &pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
skillArg: clamp(rand.NormFloat64(), -3, 3),
|
||||
},
|
||||
StringArgs: map[string]string{
|
||||
modeArg: t.randomMode(),
|
||||
},
|
||||
Tags: tags,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MatchFunction puts tickets into matches based on their skill, finding the
|
||||
// required number of tickets for a game within the maximum skill difference.
|
||||
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
|
||||
skill := func(t *pb.Ticket) float64 {
|
||||
return t.SearchFields.DoubleArgs[skillArg]
|
||||
}
|
||||
|
||||
tickets := poolTickets[poolName]
|
||||
var matches []*pb.Match
|
||||
|
||||
sort.Slice(tickets, func(i, j int) bool {
|
||||
return skill(tickets[i]) < skill(tickets[j])
|
||||
})
|
||||
|
||||
for i := 0; i+t.playersPerGame <= len(tickets); i++ {
|
||||
mt := tickets[i : i+t.playersPerGame]
|
||||
if skill(mt[len(mt)-1])-skill(mt[0]) < t.maxSkillDifference {
|
||||
avg := float64(0)
|
||||
for _, t := range mt {
|
||||
avg += skill(t)
|
||||
}
|
||||
avg /= float64(len(mt))
|
||||
|
||||
q := float64(0)
|
||||
for _, t := range mt {
|
||||
diff := skill(t) - avg
|
||||
q -= diff * diff
|
||||
}
|
||||
|
||||
m, err := (&matchExt{
|
||||
id: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
|
||||
matchProfile: p.GetName(),
|
||||
matchFunction: "skillmatcher",
|
||||
tickets: mt,
|
||||
quality: q,
|
||||
}).pack()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches = append(matches, m)
|
||||
}
|
||||
}
|
||||
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// Evaluate returns matches in order of highest quality, skipping any matches
|
||||
// which contain tickets that are already used.
|
||||
func (t *TeamShooterScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
// Unpacked proposal matches.
|
||||
proposals := []*matchExt{}
|
||||
// Ticket ids which are used in a match.
|
||||
used := map[string]struct{}{}
|
||||
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading evaluator input stream: %w", err)
|
||||
}
|
||||
|
||||
p, err := unpackMatch(req.GetMatch())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposals = append(proposals, p)
|
||||
}
|
||||
|
||||
// Higher quality is better.
|
||||
sort.Slice(proposals, func(i, j int) bool {
|
||||
return proposals[i].quality > proposals[j].quality
|
||||
})
|
||||
|
||||
outer:
|
||||
for _, p := range proposals {
|
||||
for _, t := range p.tickets {
|
||||
if _, ok := used[t.Id]; ok {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range p.tickets {
|
||||
used[t.Id] = struct{}{}
|
||||
}
|
||||
|
||||
err := stream.Send(&pb.EvaluateResponse{MatchId: p.id})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error sending evaluator output stream: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// matchExt presents the match and extension data in a native form, and allows
|
||||
// easy conversion to and from proto format.
|
||||
type matchExt struct {
|
||||
id string
|
||||
tickets []*pb.Ticket
|
||||
quality float64
|
||||
matchProfile string
|
||||
matchFunction string
|
||||
}
|
||||
|
||||
func unpackMatch(m *pb.Match) (*matchExt, error) {
|
||||
v := &wrappers.DoubleValue{}
|
||||
|
||||
err := ptypes.UnmarshalAny(m.Extensions["quality"], v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error unpacking match quality: %w", err)
|
||||
}
|
||||
|
||||
return &matchExt{
|
||||
id: m.MatchId,
|
||||
tickets: m.Tickets,
|
||||
quality: v.Value,
|
||||
matchProfile: m.MatchProfile,
|
||||
matchFunction: m.MatchFunction,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *matchExt) pack() (*pb.Match, error) {
|
||||
v := &wrappers.DoubleValue{Value: m.quality}
|
||||
|
||||
a, err := ptypes.MarshalAny(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error packing match quality: %w", err)
|
||||
}
|
||||
|
||||
return &pb.Match{
|
||||
MatchId: m.id,
|
||||
Tickets: m.tickets,
|
||||
MatchProfile: m.matchProfile,
|
||||
MatchFunction: m.matchFunction,
|
||||
Extensions: map[string]*any.Any{
|
||||
"quality": a,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func clamp(v float64, min float64, max float64) float64 {
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// weightedChoice takes a map of values, and their relative probability. It
|
||||
// returns a list of the values, along with a function which will return random
|
||||
// choices from the values with the weighted probability.
|
||||
func weightedChoice(m map[string]int) ([]string, func() string) {
|
||||
s := make([]string, 0, len(m))
|
||||
total := 0
|
||||
for k, v := range m {
|
||||
s = append(s, k)
|
||||
total += v
|
||||
}
|
||||
|
||||
return s, func() string {
|
||||
remainder := rand.Intn(total)
|
||||
for k, v := range m {
|
||||
remainder -= v
|
||||
if remainder < 0 {
|
||||
return k
|
||||
}
|
||||
}
|
||||
panic("weightedChoice is broken.")
|
||||
}
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scenarios
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/matchfunction"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
|
||||
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "scale",
|
||||
})
|
||||
)
|
||||
|
||||
// StatProcessor uses syncMaps to store the stress test metrics and occurrence of errors.
|
||||
// It can write out the data to an input io.Writer.
|
||||
type StatProcessor struct {
|
||||
em *sync.Map
|
||||
sm *sync.Map
|
||||
}
|
||||
|
||||
// NewStatProcessor returns an initialized StatProcessor
|
||||
func NewStatProcessor() *StatProcessor {
|
||||
return &StatProcessor{
|
||||
em: &sync.Map{},
|
||||
sm: &sync.Map{},
|
||||
}
|
||||
}
|
||||
|
||||
// SetStat sets the value for a key
|
||||
func (e StatProcessor) SetStat(k string, v interface{}) {
|
||||
e.sm.Store(k, v)
|
||||
}
|
||||
|
||||
// IncrementStat atomically increments the value of a key by delta
|
||||
func (e StatProcessor) IncrementStat(k string, delta interface{}) {
|
||||
statRead, ok := e.sm.Load(k)
|
||||
if !ok {
|
||||
statRead = 0
|
||||
}
|
||||
|
||||
switch delta.(type) {
|
||||
case int:
|
||||
e.sm.Store(k, statRead.(int)+delta.(int))
|
||||
case float32:
|
||||
e.sm.Store(k, statRead.(float32)+delta.(float32))
|
||||
case float64:
|
||||
e.sm.Store(k, statRead.(float64)+delta.(float64))
|
||||
default:
|
||||
logger.Errorf("IncrementStat: type %T not supported", delta)
|
||||
}
|
||||
}
|
||||
|
||||
// RecordError atomically records the occurrence of input errors
|
||||
func (e StatProcessor) RecordError(desc string, err error) {
|
||||
errMsg := fmt.Sprintf("%s: %s", desc, err.Error())
|
||||
errRead, ok := e.em.Load(errMsg)
|
||||
if !ok {
|
||||
errRead = 0
|
||||
}
|
||||
e.em.Store(errMsg, errRead.(int)+1)
|
||||
}
|
||||
|
||||
// Log writes the formatted errors and metrics to the input writer
|
||||
func (e StatProcessor) Log(w io.Writer) {
|
||||
e.sm.Range(func(k interface{}, v interface{}) bool {
|
||||
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
|
||||
return true
|
||||
})
|
||||
e.em.Range(func(k interface{}, v interface{}) bool {
|
||||
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func getQueryServiceGRPCClient() pb.QueryServiceClient {
|
||||
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to connect to Open Match, got %v", err)
|
||||
}
|
||||
return pb.NewQueryServiceClient(conn)
|
||||
}
|
||||
|
||||
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
|
||||
var q pb.QueryServiceClient
|
||||
var startQ sync.Once
|
||||
|
||||
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
|
||||
startQ.Do(func() {
|
||||
q = getQueryServiceGRPCClient()
|
||||
})
|
||||
|
||||
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proposals, err := mmf(req.GetProfile(), poolTickets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.WithFields(logrus.Fields{
|
||||
"proposals": proposals,
|
||||
}).Trace("proposals returned by match function")
|
||||
|
||||
for _, proposal := range proposals {
|
||||
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
9
go.mod
9
go.mod
@ -15,7 +15,7 @@ module open-match.dev/open-match
|
||||
// limitations under the License.
|
||||
|
||||
// When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.47.0 // indirect
|
||||
@ -23,8 +23,9 @@ require (
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.6.0
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
|
||||
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
|
||||
github.com/TV4/logrus-stackdriver-formatter v0.1.0
|
||||
github.com/alicebob/miniredis/v2 v2.10.1
|
||||
github.com/alicebob/miniredis/v2 v2.11.0
|
||||
github.com/apache/thrift v0.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.25.27 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
@ -32,7 +33,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3
|
||||
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
|
||||
github.com/googleapis/gnostic v0.3.1 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
@ -51,10 +52,10 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.5.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect
|
||||
go.opencensus.io v0.22.1
|
||||
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
|
||||
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
google.golang.org/api v0.13.0 // indirect
|
||||
|
21
go.sum
21
go.sum
@ -20,9 +20,13 @@ contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZ
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.8 h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71 h1:J52um+Sp3v8TpSY0wOgpjr84np+xvrY3503DRirJ6wI=
|
||||
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71/go.mod h1:E4OavwrrOME3uj3Zm9Rla8ZDqlAR5GqKA+mMIPoilYk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/FZambia/sentinel v1.0.0 h1:KJ0ryjKTZk5WMp0dXvSdNqp3lFaW1fNFuEYfrkLOYIc=
|
||||
github.com/FZambia/sentinel v1.0.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
@ -34,8 +38,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis/v2 v2.10.1 h1:r+hpRUqYCcIsrjxH/wRLwQGmA2nkQf4IYj7MKPwbA+s=
|
||||
github.com/alicebob/miniredis/v2 v2.10.1/go.mod h1:gUxwu+6dLLmJHIXOOBlgcXqbcpPPp+NzOnBzgqFIGYA=
|
||||
github.com/alicebob/miniredis/v2 v2.11.0 h1:Dz6uJ4w3Llb1ZiFoqyzF9aLuzbsEWCeKwstu9MzmSAk=
|
||||
github.com/alicebob/miniredis/v2 v2.11.0/go.mod h1:UA48pmi7aSazcGAvcdKcBB49z521IC9VjTTRz2nIaJE=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
||||
@ -75,6 +79,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@ -107,8 +112,10 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible h1:1mCVU17Wc8oyVUlx1ZXpnWz1DNP6v0R5z5ElKCTvVrY=
|
||||
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
@ -122,6 +129,8 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
@ -170,6 +179,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
|
||||
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
@ -263,8 +274,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190206043414-8bfc7677f583/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
|
||||
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c h1:RCby8AaF+weuP1M+nwMQ4uQYO2shgD6UFAKvnXszwTw=
|
||||
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
@ -20,28 +20,6 @@ metadata:
|
||||
app: open-match-demo
|
||||
release: open-match-demo
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: customize-configmap
|
||||
namespace: open-match-demo
|
||||
labels:
|
||||
app: open-match-customize
|
||||
component: config
|
||||
release: open-match-demo
|
||||
data:
|
||||
matchmaker_config_default.yaml: |-
|
||||
api:
|
||||
functions:
|
||||
hostname: "om-function"
|
||||
grpcport: 50502
|
||||
httpport: 51502
|
||||
matchmaker_config_override.yaml: |-
|
||||
api:
|
||||
query:
|
||||
hostname: "om-query.open-match.svc.cluster.local"
|
||||
grpcport: "50503"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@ -108,20 +86,8 @@ spec:
|
||||
component: matchfunction
|
||||
release: open-match-demo
|
||||
spec:
|
||||
volumes:
|
||||
- name: customize-config-volume
|
||||
configMap:
|
||||
name: customize-configmap
|
||||
- name: om-config-volume-default
|
||||
configMap:
|
||||
name: customize-configmap
|
||||
containers:
|
||||
- name: om-function
|
||||
volumeMounts:
|
||||
- name: customize-config-volume
|
||||
mountPath: /app/config/override
|
||||
- name: om-config-volume-default
|
||||
mountPath: /app/config/default
|
||||
image: "gcr.io/open-match-public-images/openmatch-mmf-go-soloduel:0.0.0-dev"
|
||||
ports:
|
||||
- name: grpc
|
||||
|
@ -13,14 +13,14 @@
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v2
|
||||
appVersion: "0.0.0-dev"
|
||||
version: 0.0.0-dev
|
||||
appVersion: "1.0.0-rc.1"
|
||||
version: 1.0.0-rc.1
|
||||
name: open-match
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: 9.5.0
|
||||
repository: https://kubernetes-charts.storage.googleapis.com/
|
||||
condition: open-match-core.redis.install
|
||||
condition: open-match-core.redis.enabled
|
||||
- name: open-match-telemetry
|
||||
version: 0.0.0-dev
|
||||
condition: open-match-telemetry.enabled
|
||||
|
@ -1,41 +0,0 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: customize-configmap
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
|
||||
labels:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
component: config
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
matchmaker_config_default.yaml: |-
|
||||
api:
|
||||
functions:
|
||||
hostname: "{{ .Values.function.hostName }}"
|
||||
grpcport: "{{ .Values.function.grpcPort }}"
|
||||
httpport: "{{ .Values.function.httpPort }}"
|
||||
|
||||
evaluator:
|
||||
hostname: "{{ .Values.evaluator.hostName }}"
|
||||
grpcport: "{{ .Values.evaluator.grpcPort }}"
|
||||
httpport: "{{ .Values.evaluator.httpPort }}"
|
||||
matchmaker_config_override.yaml: |-
|
||||
api:
|
||||
query:
|
||||
hostname: "{{ .Values.query.hostName }}.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
grpcport: "{{ .Values.query.grpcPort }}"
|
@ -26,7 +26,7 @@ evaluator:
|
||||
enabled: false
|
||||
replicas: 3
|
||||
portType: ClusterIP
|
||||
image: openmatch-evaluator-go-simple
|
||||
image: openmatch-default-evaluator
|
||||
|
||||
evaluatorConfigs:
|
||||
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
|
||||
@ -35,11 +35,11 @@ evaluatorConfigs:
|
||||
default:
|
||||
volumeName: om-config-volume-default
|
||||
mountPath: /app/config/default
|
||||
configName: customize-configmap
|
||||
configName: om-configmap-default
|
||||
customize:
|
||||
volumeName: customize-config-volume
|
||||
volumeName: om-config-volume-override
|
||||
mountPath: /app/config/override
|
||||
configName: customize-configmap
|
||||
configName: om-configmap-override
|
||||
|
||||
mmfConfigs:
|
||||
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
|
||||
@ -48,8 +48,8 @@ mmfConfigs:
|
||||
default:
|
||||
volumeName: om-config-volume-default
|
||||
mountPath: /app/config/default
|
||||
configName: customize-configmap
|
||||
configName: om-configmap-default
|
||||
customize:
|
||||
volumeName: customize-config-volume
|
||||
volumeName: om-config-volume-override
|
||||
mountPath: /app/config/override
|
||||
configName: customize-configmap
|
||||
configName: om-configmap-override
|
||||
|
@ -320,6 +320,100 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"cacheTimeout": null,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 19
|
||||
},
|
||||
"id": 24,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(scale_frontend_runners_waiting)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Runners Waiting To Start",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum(scale_frontend_runners_creating)",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Runners Creating Ticket",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Outstanding Frontend Runners",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
|
@ -1,108 +0,0 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: scale-configmap
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
|
||||
labels:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
component: config
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
matchmaker_config_default.yaml: |-
|
||||
api:
|
||||
backend:
|
||||
hostname: "{{ .Values.backend.hostName }}"
|
||||
grpcport: "{{ .Values.backend.grpcPort }}"
|
||||
httpport: "{{ .Values.backend.httpPort }}"
|
||||
frontend:
|
||||
hostname: "{{ .Values.frontend.hostName }}"
|
||||
grpcport: "{{ .Values.frontend.grpcPort }}"
|
||||
httpport: "{{ .Values.frontend.httpPort }}"
|
||||
scale:
|
||||
httpport: "51509"
|
||||
{{- if .Values.global.tls.enabled }}
|
||||
tls:
|
||||
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
|
||||
certificatefile: "{{.Values.global.tls.server.mountPath}}/public.cert"
|
||||
privatekey: "{{.Values.global.tls.server.mountPath}}/private.key"
|
||||
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
|
||||
{{- end }}
|
||||
|
||||
logging:
|
||||
level: debug
|
||||
{{- if .Values.global.telemetry.stackdriverMetrics.enabled }}
|
||||
format: stackdriver
|
||||
{{- else }}
|
||||
format: text
|
||||
{{- end }}
|
||||
rpc: {{ .Values.global.logging.rpc.enabled }}
|
||||
# Open Match applies the exponential backoff strategy for its retryable gRPC calls.
|
||||
# The settings below are the default backoff configuration used in Open Match.
|
||||
# See https://github.com/cenkalti/backoff/blob/v3/exponential.go for detailed explanations
|
||||
backoff:
|
||||
# The initial retry interval (in milliseconds)
|
||||
initialInterval: 100ms
|
||||
# maxInterval caps the maximum time elapsed for a retry interval
|
||||
maxInterval: 500ms
|
||||
# The next retry interval is multiplied by this multiplier
|
||||
multiplier: 1.5
|
||||
# Randomize the retry interval
|
||||
randFactor: 0.5
|
||||
# maxElapsedTime caps the retry time (in milliseconds)
|
||||
maxElapsedTime: 3000ms
|
||||
|
||||
telemetry:
|
||||
zpages:
|
||||
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
|
||||
jaeger:
|
||||
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
|
||||
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
|
||||
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
|
||||
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
|
||||
prometheus:
|
||||
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
|
||||
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"
|
||||
serviceDiscovery: "{{ .Values.global.telemetry.prometheus.serviceDiscovery }}"
|
||||
stackdriverMetrics:
|
||||
enable: "{{ .Values.global.telemetry.stackdriverMetrics.enabled }}"
|
||||
gcpProjectId: "{{ .Values.global.gcpProjectId }}"
|
||||
prefix: "{{ .Values.global.telemetry.stackdriverMetrics.prefix }}"
|
||||
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
|
||||
|
||||
matchmaker_config_override.yaml: |-
|
||||
testConfig:
|
||||
profile: "{{ .Values.testConfig.profile }}"
|
||||
regions:
|
||||
{{- range .Values.testConfig.regions }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
characters:
|
||||
{{- range .Values.testConfig.characters }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
minRating: "{{ .Values.testConfig.minRating }}"
|
||||
maxRating: "{{ .Values.testConfig.maxRating }}"
|
||||
ticketsPerMatch: "{{ .Values.testConfig.ticketsPerMatch }}"
|
||||
multifilter:
|
||||
rangeSize: "{{ .Values.testConfig.multifilter.rangeSize }}"
|
||||
rangeOverlap: "{{ .Values.testConfig.multifilter.rangeOverlap }}"
|
||||
multipool:
|
||||
rangeSize: "{{ .Values.testConfig.multipool.rangeSize }}"
|
||||
rangeOverlap: "{{ .Values.testConfig.multipool.rangeOverlap }}"
|
||||
characterCount: "{{ .Values.testConfig.multipool.characterCount }}"
|
||||
|
@ -28,29 +28,8 @@ configs:
|
||||
default:
|
||||
volumeName: om-config-volume-default
|
||||
mountPath: /app/config/default
|
||||
configName: scale-configmap
|
||||
scale-configmap:
|
||||
volumeName: scale-config-volume
|
||||
configName: om-configmap-default
|
||||
override:
|
||||
volumeName: om-config-volume-override
|
||||
mountPath: /app/config/override
|
||||
configName: scale-configmap
|
||||
|
||||
testConfig:
|
||||
profile: scaleprofiles
|
||||
regions:
|
||||
- region.europe-west1
|
||||
- region.europe-west2
|
||||
- region.europe-west3
|
||||
- region.europe-west4
|
||||
characters:
|
||||
- cleric
|
||||
- knight
|
||||
minRating: 0
|
||||
maxRating: 100
|
||||
ticketsPerMatch: 8
|
||||
multifilter:
|
||||
rangeSize: 10
|
||||
rangeOverlap: 5
|
||||
multipool:
|
||||
rangeSize: 10
|
||||
rangeOverlap: 5
|
||||
characterCount: 4
|
||||
configName: om-configmap-override
|
@ -15,8 +15,8 @@
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 3,
|
||||
"iteration": 1580245993833,
|
||||
"id": 2,
|
||||
"iteration": 1580944984710,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -415,7 +415,7 @@
|
||||
},
|
||||
"id": 57,
|
||||
"panels": [],
|
||||
"title": "openmatch.Mmlogic/QueryTickets",
|
||||
"title": "openmatch.QueryService/QueryTickets",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
@ -812,7 +812,7 @@
|
||||
},
|
||||
"id": 29,
|
||||
"panels": [],
|
||||
"title": "openmatch.Backend/AssignTickets",
|
||||
"title": "openmatch.BackendService/AssignTickets",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
@ -1210,7 +1210,7 @@
|
||||
"id": 31,
|
||||
"panels": [],
|
||||
"repeat": null,
|
||||
"title": "openmatch.Frontend/CreateTicket",
|
||||
"title": "openmatch.FrontendService/CreateTicket",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
@ -2399,7 +2399,7 @@
|
||||
},
|
||||
"id": 42,
|
||||
"panels": [],
|
||||
"title": "openmatch.Frontend/FetchMatches",
|
||||
"title": "openmatch.BackendService/FetchMatches",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
@ -3191,7 +3191,7 @@
|
||||
},
|
||||
"id": 23,
|
||||
"panels": [],
|
||||
"title": "openmatch.Frontend/DeleteTicket",
|
||||
"title": "openmatch.FrontendService/DeleteTicket",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
|
2131
install/helm/open-match/subcharts/open-match-telemetry/dashboards/match-making.json
Normal file
2131
install/helm/open-match/subcharts/open-match-telemetry/dashboards/match-making.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -16,8 +16,8 @@
|
||||
"editable": true,
|
||||
"gnetId": 763,
|
||||
"graphTooltip": 0,
|
||||
"id": 2,
|
||||
"iteration": 1579655194536,
|
||||
"id": 6,
|
||||
"iteration": 1580946687856,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
@ -296,7 +296,7 @@
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"fill": 0,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 8,
|
||||
@ -312,6 +312,8 @@
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"sort": "current",
|
||||
"sortDesc": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
@ -325,24 +327,54 @@
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "limit",
|
||||
"color": "#C4162A",
|
||||
"hideTooltip": true,
|
||||
"legend": false,
|
||||
"nullPointMode": "connected"
|
||||
},
|
||||
{
|
||||
"alias": "request",
|
||||
"color": "#73BF69",
|
||||
"hideTooltip": true,
|
||||
"legend": false,
|
||||
"nullPointMode": "connected"
|
||||
}
|
||||
],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name, container_name) /\nsum(container_spec_cpu_quota{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}/container_spec_cpu_period{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}) by (pod_name, container_name) * 100",
|
||||
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{pod_name}}",
|
||||
"legendFormat": "{{pod_name}} usage",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum(kube_pod_container_resource_limits_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "limit",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum(kube_pod_container_resource_requests_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "request",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "CPU Usage Percentage of Limit",
|
||||
"title": "CPU Usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@ -360,7 +392,7 @@
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": "%",
|
||||
"label": "core",
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
@ -628,6 +660,8 @@
|
||||
"min": false,
|
||||
"rightSide": true,
|
||||
"show": true,
|
||||
"sort": "current",
|
||||
"sortDesc": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
@ -655,6 +689,13 @@
|
||||
"refId": "A",
|
||||
"step": 240,
|
||||
"target": ""
|
||||
},
|
||||
{
|
||||
"expr": "sum by (kubernetes_pod_name) (rate(redis_commands_total[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "total - {{kubernetes_pod_name}}",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
@ -911,8 +952,8 @@
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "10.28.0.27:9121",
|
||||
"value": "10.28.0.27:9121"
|
||||
"text": "10.28.0.12:9121",
|
||||
"value": "10.28.0.12:9121"
|
||||
},
|
||||
"datasource": "Prometheus",
|
||||
"definition": "label_values(redis_up, instance)",
|
||||
|
@ -1,290 +0,0 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 3,
|
||||
"iteration": 1562886170229,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(frontend_tickets_created[$timewindow]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Created",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(frontend_tickets_deleted[$timewindow]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Deleted",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Ticket Flow",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"description": "",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"interval": "",
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"options": {},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(frontend_tickets_assignments_retrieved[$timewindow]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Assignments Retrieved",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Assignments",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"decimals": null,
|
||||
"format": "reqps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 18,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {
|
||||
"text": "5m",
|
||||
"value": "5m"
|
||||
},
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Time Window",
|
||||
"multi": false,
|
||||
"name": "timewindow",
|
||||
"options": [
|
||||
{
|
||||
"selected": true,
|
||||
"text": "5m",
|
||||
"value": "5m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10m",
|
||||
"value": "10m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "15m",
|
||||
"value": "15m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30m",
|
||||
"value": "30m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1h",
|
||||
"value": "1h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "4h",
|
||||
"value": "4h"
|
||||
}
|
||||
],
|
||||
"query": "5m,10m,15m,30m,1h,4h",
|
||||
"skipUrlSync": false,
|
||||
"type": "custom"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Tickets",
|
||||
"uid": "TlgyFfIWz",
|
||||
"version": 6
|
||||
}
|
@ -68,10 +68,15 @@ data:
|
||||
swaggerui:
|
||||
hostname: "{{ .Values.swaggerui.hostName }}"
|
||||
httpport: "{{ .Values.swaggerui.httpPort }}"
|
||||
scale-frontend:
|
||||
|
||||
# Configurations for api.test and api.scale are used for testing.
|
||||
test:
|
||||
hostname: "test"
|
||||
grpcport: "50509"
|
||||
httpport: "51509"
|
||||
scale-backend:
|
||||
httpport: "51510"
|
||||
scale:
|
||||
httpport: "51509"
|
||||
|
||||
{{- if .Values.global.tls.enabled }}
|
||||
tls:
|
||||
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
|
||||
@ -80,37 +85,40 @@ data:
|
||||
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
|
||||
{{- end }}
|
||||
|
||||
storage:
|
||||
ignoreListTTL: {{ index .Values "open-match-core" "ignoreListTTL" }}
|
||||
page:
|
||||
size: 10000
|
||||
|
||||
redis:
|
||||
{{- if index .Values "open-match-core" "redis" "install" }}
|
||||
{{- if index .Values "open-match-core" "redis" "enabled" }}
|
||||
{{- if index .Values "redis" "sentinel" "enabled"}}
|
||||
sentinelPort: {{ .Values.redis.sentinel.port }}
|
||||
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
|
||||
sentinelHostname: {{ .Values.redis.fullnameOverride }}
|
||||
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
|
||||
{{- else}}
|
||||
# Open Match's default Redis setups
|
||||
hostname: {{ .Values.redis.fullnameOverride }}-master.{{ .Release.Namespace }}.svc.cluster.local
|
||||
port: {{ .Values.redis.redisPort }}
|
||||
user: {{ .Values.redis.user }}
|
||||
{{- end}}
|
||||
{{- else }}
|
||||
# BYO Redis setups
|
||||
hostname: {{ index .Values "open-match-core" "redis" "hostname" }}
|
||||
port: {{ index .Values "open-match-core" "redis" "port" }}
|
||||
user: {{ index .Values "open-match-core" "redis" "user" }}
|
||||
{{- end }}
|
||||
{{- if .Values.redis.usePassword }}
|
||||
usePassword: {{ .Values.redis.usePassword }}
|
||||
passwordPath: {{ .Values.redis.secretMountPath }}/redis-password
|
||||
{{- end }}
|
||||
pool:
|
||||
maxIdle: {{ index .Values "open-match-core" "redis" "pool" "maxIdle" }}
|
||||
maxActive: {{ index .Values "open-match-core" "redis" "pool" "maxActive" }}
|
||||
idleTimeout: {{ index .Values "open-match-core" "redis" "pool" "idleTimeout" }}
|
||||
healthCheckTimeout: {{ index .Values "open-match-core" "redis" "pool" "healthCheckTimeout" }}
|
||||
expiration: 43200
|
||||
|
||||
telemetry:
|
||||
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
|
||||
traceSamplingFraction: "{{ .Values.global.telemetry.traceSamplingFraction }}"
|
||||
zpages:
|
||||
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
|
||||
jaeger:
|
||||
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
|
||||
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
|
||||
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
|
||||
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
|
||||
prometheus:
|
||||
|
@ -25,12 +25,24 @@ metadata:
|
||||
release: {{ .Release.Name }}
|
||||
data:
|
||||
matchmaker_config_override.yaml: |-
|
||||
# Length of time between first fetch matches call, and when no further fetch
|
||||
# matches calls will join the current evaluation/synchronization cycle,
|
||||
# instead waiting for the next cycle.
|
||||
registrationInterval: {{ index .Values "open-match-core" "registrationInterval" }}
|
||||
# Length of time after match function as started before it will be canceled,
|
||||
# and evaluator call input is EOF.
|
||||
proposalCollectionInterval: {{ index .Values "open-match-core" "proposalCollectionInterval" }}
|
||||
# Time after a ticket has been returned from fetch matches (marked as pending)
|
||||
# before it automatically becomes active again and will be returned by query
|
||||
# calls.
|
||||
pendingReleaseTimeout: {{ index .Values "open-match-core" "pendingReleaseTimeout" }}
|
||||
# Time after a ticket has been assigned before it is automatically delted.
|
||||
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
|
||||
api:
|
||||
evaluator:
|
||||
hostname: "{{ .Values.evaluator.hostName }}"
|
||||
grpcport: "{{ .Values.evaluator.grpcPort }}"
|
||||
httpport: "{{ .Values.evaluator.httpPort }}"
|
||||
synchronizer:
|
||||
registrationIntervalMs: 250ms
|
||||
proposalCollectionIntervalMs: 20000ms
|
||||
{{- end }}
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if index .Values "open-match-core" "enabled" }}
|
||||
{{- if index .Values "open-match-core" "swaggerui" "enabled" }}
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
|
@ -1,3 +1,19 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.ci }}
|
||||
|
||||
# This applies om-test-role to the open-match-test-service account under the release namespace.
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@ -16,3 +32,5 @@ roleRef:
|
||||
kind: Role
|
||||
name: om-test-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
{{- end }}
|
||||
|
@ -1,3 +1,19 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.ci }}
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -27,3 +43,5 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
||||
{{- end }}
|
||||
|
@ -1,3 +1,19 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.ci }}
|
||||
|
||||
# Create a service account for open-match-test services.
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@ -9,3 +25,5 @@ metadata:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
automountServiceAccountToken: true
|
||||
|
||||
{{- end }}
|
||||
|
@ -1,24 +1,83 @@
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
{{- if .Values.ci }}
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
|
||||
labels:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
component: test
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
selector:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
component: test
|
||||
release: {{ .Release.Name }}
|
||||
ports:
|
||||
- name: grpc
|
||||
protocol: TCP
|
||||
port: 50509
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 51509
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: om-test
|
||||
name: test
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
{{- include "openmatch.chartmeta" . | nindent 4 }}
|
||||
"helm.sh/hook": test-success
|
||||
labels:
|
||||
app: {{ template "openmatch.name" . }}
|
||||
component: om-test
|
||||
component: test
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
# Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.
|
||||
activeDeadlineSeconds: 900
|
||||
serviceAccountName: open-match-test-service
|
||||
automountServiceAccountToken: true
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: om-configmap-default
|
||||
name: om-config-volume-default
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: om-configmap-override
|
||||
name: om-config-volume-override
|
||||
containers:
|
||||
- image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
|
||||
- name: "test"
|
||||
volumeMounts:
|
||||
- mountPath: /app/config/default
|
||||
name: om-config-volume-default
|
||||
- mountPath: /app/config/override
|
||||
name: om-config-volume-override
|
||||
image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
|
||||
ports:
|
||||
- name: grpc
|
||||
containerPort: 50509
|
||||
- name: http
|
||||
containerPort: 51509
|
||||
imagePullPolicy: Always
|
||||
name: om-test
|
||||
name: test
|
||||
resources:
|
||||
limits:
|
||||
memory: 800Mi
|
||||
@ -32,7 +91,7 @@ spec:
|
||||
command: ["go"]
|
||||
args:
|
||||
- "test"
|
||||
- "./test/e2e"
|
||||
- "./internal/testing/e2e"
|
||||
- "-v"
|
||||
- "-timeout"
|
||||
- "150s"
|
||||
@ -40,3 +99,5 @@ spec:
|
||||
- "-tags"
|
||||
- "e2ecluster"
|
||||
restartPolicy: Never
|
||||
|
||||
{{- end }}
|
||||
|
@ -112,6 +112,10 @@ redis:
|
||||
configmap: |
|
||||
maxclients 100000
|
||||
maxmemory 500000000
|
||||
sentinel:
|
||||
enabled: true
|
||||
masterSet: om-redis-master
|
||||
port: 26379
|
||||
master:
|
||||
disableCommands: [] # don't disable 'FLUSH-' commands
|
||||
resources:
|
||||
@ -168,10 +172,26 @@ redis:
|
||||
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
|
||||
open-match-core:
|
||||
enabled: true
|
||||
ignoreListTTL: 60000ms
|
||||
|
||||
# Length of time between first fetch matches call, and when no further fetch
|
||||
# matches calls will join the current evaluation/synchronization cycle,
|
||||
# instead waiting for the next cycle.
|
||||
registrationInterval: 250ms
|
||||
# Length of time after match function as started before it will be canceled,
|
||||
# and evaluator call input is EOF.
|
||||
proposalCollectionInterval: 20s
|
||||
# Time after a ticket has been returned from fetch matches (marked as pending)
|
||||
# before it automatically becomes active again and will be returned by query
|
||||
# calls.
|
||||
pendingReleaseTimeout: 1m
|
||||
# Time after a ticket has been assigned before it is automatically delted.
|
||||
assignedDeleteTimeout: 10m
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: 10000
|
||||
|
||||
redis:
|
||||
install: true
|
||||
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
|
||||
enabled: true
|
||||
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
|
||||
# Otherwise the default is set to the om-redis instance.
|
||||
hostname: # Your redis server address
|
||||
port: 6379
|
||||
@ -181,6 +201,8 @@ open-match-core:
|
||||
maxActive: 500
|
||||
idleTimeout: 0
|
||||
healthCheckTimeout: 300ms
|
||||
swaggerui:
|
||||
enabled: false
|
||||
|
||||
# Controls if users need to install scale testing setup for Open Match.
|
||||
open-match-scale:
|
||||
@ -250,7 +272,7 @@ global:
|
||||
# Use this field if you need to override the image registry and image tag for all services defined in this chart
|
||||
image:
|
||||
registry: gcr.io/open-match-public-images
|
||||
tag: 0.0.0-dev
|
||||
tag: 1.0.0-rc.1
|
||||
pullPolicy: Always
|
||||
|
||||
|
||||
@ -258,11 +280,12 @@ global:
|
||||
# requires pod-level annotation to customize its scrape path.
|
||||
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
|
||||
telemetry:
|
||||
reportingPeriod: "1m"
|
||||
traceSamplingFraction: 0.005 # What fraction of traces to sample.
|
||||
zpages:
|
||||
enabled: true
|
||||
jaeger:
|
||||
enabled: false
|
||||
samplerFraction: 0.005 # Configures a sampler that samples a given fraction of traces.
|
||||
agentEndpoint: "open-match-jaeger-agent:6831"
|
||||
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
|
||||
prometheus:
|
||||
@ -274,4 +297,3 @@ global:
|
||||
prefix: "open_match"
|
||||
grafana:
|
||||
enabled: false
|
||||
reportingPeriod: "1m"
|
||||
|
@ -112,6 +112,10 @@ redis:
|
||||
configmap: |
|
||||
maxclients 100000
|
||||
maxmemory 300000000
|
||||
sentinel:
|
||||
enabled: true
|
||||
masterSet: om-redis-master
|
||||
port: 26379
|
||||
master:
|
||||
disableCommands: [] # don't disable 'FLUSH-' commands
|
||||
resources:
|
||||
@ -153,10 +157,26 @@ redis:
|
||||
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
|
||||
open-match-core:
|
||||
enabled: true
|
||||
ignoreListTTL: 60000ms
|
||||
|
||||
# Length of time between first fetch matches call, and when no further fetch
|
||||
# matches calls will join the current evaluation/synchronization cycle,
|
||||
# instead waiting for the next cycle.
|
||||
registrationInterval: 250ms
|
||||
# Length of time after match function as started before it will be canceled,
|
||||
# and evaluator call input is EOF.
|
||||
proposalCollectionInterval: 20s
|
||||
# Time after a ticket has been returned from fetch matches (marked as pending)
|
||||
# before it automatically becomes active again and will be returned by query
|
||||
# calls.
|
||||
pendingReleaseTimeout: 1m
|
||||
# Time after a ticket has been assigned before it is automatically delted.
|
||||
assignedDeleteTimeout: 10m
|
||||
# Maximum number of tickets to return on a single QueryTicketsResponse.
|
||||
queryPageSize: 10000
|
||||
|
||||
redis:
|
||||
install: true
|
||||
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
|
||||
enabled: true
|
||||
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
|
||||
# Otherwise the default is set to the om-redis instance.
|
||||
hostname: # Your redis server address
|
||||
port: 6379
|
||||
@ -166,6 +186,8 @@ open-match-core:
|
||||
maxActive: 0
|
||||
idleTimeout: 0
|
||||
healthCheckTimeout: 300ms
|
||||
swaggerui:
|
||||
enabled: true
|
||||
|
||||
# Controls if users need to install scale testing setup for Open Match.
|
||||
open-match-scale:
|
||||
@ -235,7 +257,7 @@ global:
|
||||
# Use this field if you need to override the image registry and image tag for all services defined in this chart
|
||||
image:
|
||||
registry: gcr.io/open-match-public-images
|
||||
tag: 0.0.0-dev
|
||||
tag: 1.0.0-rc.1
|
||||
pullPolicy: Always
|
||||
|
||||
|
||||
@ -243,11 +265,12 @@ global:
|
||||
# requires pod-level annotation to customize its scrape path.
|
||||
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
|
||||
telemetry:
|
||||
reportingPeriod: "1m"
|
||||
traceSamplingFraction: 0.01 # What fraction of traces to sample.
|
||||
zpages:
|
||||
enabled: true
|
||||
jaeger:
|
||||
enabled: false
|
||||
samplerFraction: 0.01 # Configures a sampler that samples a given fraction of traces.
|
||||
agentEndpoint: "open-match-jaeger-agent:6831"
|
||||
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
|
||||
prometheus:
|
||||
@ -259,4 +282,3 @@ global:
|
||||
prefix: "open_match"
|
||||
grafana:
|
||||
enabled: false
|
||||
reportingPeriod: "1m"
|
||||
|
@ -1,55 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package app contains the common application initialization code for Open Match servers.
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/logging"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "openmatch",
|
||||
"component": "app.main",
|
||||
})
|
||||
)
|
||||
|
||||
// RunApplication creates a server.
|
||||
func RunApplication(serverName string, getCfg func() (config.View, error), bindService func(*rpc.ServerParams, config.View) error) {
|
||||
cfg, err := getCfg()
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatalf("cannot read configuration.")
|
||||
}
|
||||
logging.ConfigureLogging(cfg)
|
||||
p, err := rpc.NewServerParamsFromConfig(cfg, "api."+serverName)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatalf("cannot construct server.")
|
||||
}
|
||||
|
||||
if err := bindService(p, cfg); err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatalf("failed to bind %s service.", serverName)
|
||||
}
|
||||
|
||||
rpc.MustServeForever(p)
|
||||
}
|
@ -15,25 +15,72 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
|
||||
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
|
||||
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
|
||||
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
|
||||
|
||||
totalMatchesView = &view.View{
|
||||
Measure: totalBytesPerMatch,
|
||||
Name: "open-match.dev/backend/total_matches",
|
||||
Description: "Total number of matches",
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
totalBytesPerMatchView = &view.View{
|
||||
Measure: totalBytesPerMatch,
|
||||
Name: "open-match.dev/backend/total_bytes_per_match",
|
||||
Description: "Total bytes per match",
|
||||
Aggregation: telemetry.DefaultBytesDistribution,
|
||||
}
|
||||
ticketsPerMatchView = &view.View{
|
||||
Measure: ticketsPerMatch,
|
||||
Name: "open-match.dev/backend/tickets_per_match",
|
||||
Description: "Tickets per ticket",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
ticketsAssignedView = &view.View{
|
||||
Measure: ticketsAssigned,
|
||||
Name: "open-match.dev/backend/tickets_assigned",
|
||||
Description: "Number of tickets assigned per request",
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
ticketsReleasedView = &view.View{
|
||||
Measure: ticketsReleased,
|
||||
Name: "open-match.dev/backend/tickets_released",
|
||||
Description: "Number of tickets released per request",
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
)
|
||||
|
||||
// BindService creates the backend service and binds it to the serving harness.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
service := &backendService{
|
||||
synchronizer: newSynchronizerClient(cfg),
|
||||
store: statestore.New(cfg),
|
||||
cc: rpc.NewClientCache(cfg),
|
||||
synchronizer: newSynchronizerClient(p.Config()),
|
||||
store: statestore.New(p.Config()),
|
||||
cc: rpc.NewClientCache(p.Config()),
|
||||
}
|
||||
|
||||
p.AddHealthCheckFunc(service.store.HealthCheck)
|
||||
p.AddHandleFunc(func(s *grpc.Server) {
|
||||
b.AddHealthCheckFunc(service.store.HealthCheck)
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterBackendServiceServer(s, service)
|
||||
}, pb.RegisterBackendServiceHandlerFromEndpoint)
|
||||
|
||||
b.RegisterViews(
|
||||
totalMatchesView,
|
||||
totalBytesPerMatchView,
|
||||
ticketsPerMatchView,
|
||||
ticketsAssignedView,
|
||||
ticketsReleasedView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -23,16 +23,20 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/appmain/contextcause"
|
||||
"open-match.dev/open-match/internal/ipb"
|
||||
"open-match.dev/open-match/internal/omerror"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
@ -49,10 +53,6 @@ var (
|
||||
"app": "openmatch",
|
||||
"component": "app.backend",
|
||||
})
|
||||
mMatchesFetched = telemetry.Counter("backend/matches_fetched", "matches fetched")
|
||||
mMatchesSentToEvaluation = telemetry.Counter("backend/matches_sent_to_evaluation", "matches sent to evaluation")
|
||||
mTicketsAssigned = telemetry.Counter("backend/tickets_assigned", "tickets assigned")
|
||||
mTicketsReleased = telemetry.Counter("backend/tickets_released", "tickets released")
|
||||
)
|
||||
|
||||
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
|
||||
@ -60,46 +60,45 @@ var (
|
||||
// FetchMatches immediately returns an error if it encounters any execution failures.
|
||||
// - If the synchronizer is enabled, FetchMatch will then call the synchronizer to deduplicate proposals with overlapped tickets.
|
||||
func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.BackendService_FetchMatchesServer) error {
|
||||
if req.GetConfig() == nil {
|
||||
if req.Config == nil {
|
||||
return status.Error(codes.InvalidArgument, ".config is required")
|
||||
}
|
||||
if req.GetProfile() == nil {
|
||||
if req.Profile == nil {
|
||||
return status.Error(codes.InvalidArgument, ".profile is required")
|
||||
}
|
||||
|
||||
syncStream, err := s.synchronizer.synchronize(stream.Context())
|
||||
// Error group for handling the synchronizer calls only.
|
||||
eg, ctx := errgroup.WithContext(stream.Context())
|
||||
syncStream, err := s.synchronizer.synchronize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mmfCtx, cancelMmfs := context.WithCancel(stream.Context())
|
||||
// The mmf must be canceled if the synchronizer call fails (which will
|
||||
// cancel the context from the error group). However the synchronizer call
|
||||
// is NOT dependant on the mmf call.
|
||||
mmfCtx, cancelMmfs := contextcause.WithCancelCause(ctx)
|
||||
// Closed when mmfs should start.
|
||||
startMmfs := make(chan struct{})
|
||||
proposals := make(chan *pb.Match)
|
||||
m := &sync.Map{}
|
||||
|
||||
synchronizerWait := omerror.WaitOnErrors(logger, func() error {
|
||||
return synchronizeSend(stream.Context(), syncStream, m, proposals)
|
||||
}, func() error {
|
||||
return synchronizeRecv(syncStream, m, stream, startMmfs, cancelMmfs)
|
||||
eg.Go(func() error {
|
||||
return synchronizeSend(ctx, syncStream, m, proposals)
|
||||
})
|
||||
eg.Go(func() error {
|
||||
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
|
||||
})
|
||||
|
||||
mmfWait := omerror.WaitOnErrors(logger, func() error {
|
||||
select {
|
||||
case <-mmfCtx.Done():
|
||||
return fmt.Errorf("Mmf was never started")
|
||||
case <-startMmfs:
|
||||
}
|
||||
var mmfErr error
|
||||
select {
|
||||
case <-mmfCtx.Done():
|
||||
mmfErr = fmt.Errorf("mmf was never started")
|
||||
case <-startMmfs:
|
||||
mmfErr = callMmf(mmfCtx, s.cc, req, proposals)
|
||||
}
|
||||
|
||||
return callMmf(mmfCtx, s.cc, req, proposals)
|
||||
})
|
||||
|
||||
syncErr := synchronizerWait()
|
||||
// Fetch Matches should never block on just the match function.
|
||||
// Must cancel mmfs after synchronizer is done and before checking mmf error
|
||||
// because the synchronizer call could fail while the mmf call blocks.
|
||||
cancelMmfs()
|
||||
mmfErr := mmfWait()
|
||||
syncErr := eg.Wait()
|
||||
|
||||
// TODO: Send mmf error in FetchSummary instead of erroring call.
|
||||
if syncErr != nil || mmfErr != nil {
|
||||
@ -109,7 +108,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
|
||||
}).Error("error(s) in FetchMatches call.")
|
||||
|
||||
return fmt.Errorf(
|
||||
"Error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
|
||||
"error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
|
||||
syncErr,
|
||||
mmfErr,
|
||||
)
|
||||
@ -128,8 +127,10 @@ sendProposals:
|
||||
if !ok {
|
||||
break sendProposals
|
||||
}
|
||||
m.Store(p.GetMatchId(), p)
|
||||
telemetry.RecordUnitMeasurement(ctx, mMatchesSentToEvaluation)
|
||||
_, loaded := m.LoadOrStore(p.GetMatchId(), p)
|
||||
if loaded {
|
||||
return fmt.Errorf("MatchMakingFunction returned same match_id twice: \"%s\"", p.GetMatchId())
|
||||
}
|
||||
err := syncStream.Send(&ipb.SynchronizeRequest{Proposal: p})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending proposal to synchronizer: %w", err)
|
||||
@ -144,7 +145,7 @@ sendProposals:
|
||||
return nil
|
||||
}
|
||||
|
||||
func synchronizeRecv(syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs context.CancelFunc) error {
|
||||
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
|
||||
var startMmfsOnce sync.Once
|
||||
|
||||
for {
|
||||
@ -163,12 +164,17 @@ func synchronizeRecv(syncStream synchronizerStream, m *sync.Map, stream pb.Backe
|
||||
}
|
||||
|
||||
if resp.CancelMmfs {
|
||||
cancelMmfs()
|
||||
cancelMmfs(errors.New("match function ran longer than proposal window, canceling"))
|
||||
}
|
||||
|
||||
if match, ok := m.Load(resp.GetMatchId()); ok {
|
||||
telemetry.RecordUnitMeasurement(stream.Context(), mMatchesFetched)
|
||||
err = stream.Send(&pb.FetchMatchesResponse{Match: match.(*pb.Match)})
|
||||
if v, ok := m.Load(resp.GetMatchId()); ok {
|
||||
match, ok := v.(*pb.Match)
|
||||
if !ok {
|
||||
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
|
||||
}
|
||||
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
|
||||
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
|
||||
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error sending match to caller of backend: %w", err)
|
||||
}
|
||||
@ -206,6 +212,10 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
|
||||
stream, err := client.Run(ctx, &pb.RunRequest{Profile: profile})
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to run match function for profile")
|
||||
if ctx.Err() != nil {
|
||||
// gRPC likes to suppress the context's error, so stop that.
|
||||
return ctx.Err()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -216,6 +226,10 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
|
||||
}
|
||||
if err != nil {
|
||||
logger.Errorf("%v.Run() error, %v\n", client, err)
|
||||
if ctx.Err() != nil {
|
||||
// gRPC likes to suppress the context's error, so stop that.
|
||||
return ctx.Err()
|
||||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
@ -298,29 +312,49 @@ func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTick
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.RecordNUnitMeasurement(ctx, mTicketsReleased, int64(len(req.TicketIds)))
|
||||
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
|
||||
return &pb.ReleaseTicketsResponse{}, nil
|
||||
}
|
||||
|
||||
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
|
||||
err := s.store.ReleaseAllTickets(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.ReleaseAllTicketsResponse{}, nil
|
||||
}
|
||||
|
||||
// AssignTickets overwrites the Assignment field of the input TicketIds.
|
||||
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
|
||||
err := doAssignTickets(ctx, req, s.store)
|
||||
resp, err := doAssignTickets(ctx, req, s.store)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to update assignments for requested tickets")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.RecordNUnitMeasurement(ctx, mTicketsAssigned, int64(len(req.TicketIds)))
|
||||
return &pb.AssignTicketsResponse{}, nil
|
||||
numIds := 0
|
||||
for _, ag := range req.Assignments {
|
||||
numIds += len(ag.TicketIds)
|
||||
}
|
||||
|
||||
stats.Record(ctx, ticketsAssigned.M(int64(numIds)))
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) error {
|
||||
err := store.UpdateAssignments(ctx, req.GetTicketIds(), req.GetAssignment())
|
||||
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
|
||||
resp, err := store.UpdateAssignments(ctx, req)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to update assignments")
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range req.GetTicketIds() {
|
||||
|
||||
ids := []string{}
|
||||
|
||||
for _, ag := range req.Assignments {
|
||||
ids = append(ids, ag.TicketIds...)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
err = store.DeindexTicket(ctx, id)
|
||||
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
|
||||
// TODO: consider retry the index operation
|
||||
@ -329,13 +363,13 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
|
||||
}
|
||||
}
|
||||
|
||||
if err = store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds()); err != nil {
|
||||
if err = store.DeleteTicketsFromIgnoreList(ctx, ids); err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"ticket_ids": req.GetTicketIds(),
|
||||
"ticket_ids": ids,
|
||||
}).Error(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func doReleasetickets(ctx context.Context, req *pb.ReleaseTicketsRequest, store statestore.Service) error {
|
||||
|
@ -1,309 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestDoReleaseTickets(t *testing.T) {
|
||||
fakeProperty := "test-property"
|
||||
fakeTickets := []*pb.Ticket{
|
||||
{
|
||||
Id: "1",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
fakeProperty: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: "2",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
fakeProperty: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: "3",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
fakeProperty: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
preAction func(context.Context, context.CancelFunc, statestore.Service, *pb.Pool)
|
||||
req *pb.ReleaseTicketsRequest
|
||||
wantCode codes.Code
|
||||
pool *pb.Pool
|
||||
expectTickets []string
|
||||
}{
|
||||
{
|
||||
description: "expect unavailable code since context is canceled before being called",
|
||||
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service, pool *pb.Pool) {
|
||||
cancel()
|
||||
},
|
||||
req: &pb.ReleaseTicketsRequest{
|
||||
TicketIds: []string{"1"},
|
||||
},
|
||||
wantCode: codes.Unavailable,
|
||||
},
|
||||
{
|
||||
description: "expect ok code when submitted list is empty",
|
||||
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
|
||||
expectTickets: []string{"3"},
|
||||
req: &pb.ReleaseTicketsRequest{
|
||||
TicketIds: []string{},
|
||||
},
|
||||
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
|
||||
for _, fakeTicket := range fakeTickets {
|
||||
store.CreateTicket(ctx, fakeTicket)
|
||||
store.IndexTicket(ctx, fakeTicket)
|
||||
}
|
||||
|
||||
// Make sure tickets are correctly indexed.
|
||||
var wantFilteredTickets []*pb.Ticket
|
||||
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
wantFilteredTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
|
||||
|
||||
// Ignore a few tickets
|
||||
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2"})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Make sure it was properly ignored
|
||||
var ignoredFilterTickets []*pb.Ticket
|
||||
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
ignoredFilterTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(fakeTickets)-2, len(ignoredFilterTickets))
|
||||
},
|
||||
wantCode: codes.OK,
|
||||
},
|
||||
{
|
||||
description: "expect ok code",
|
||||
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
|
||||
wantCode: codes.OK,
|
||||
expectTickets: []string{"1", "2"},
|
||||
req: &pb.ReleaseTicketsRequest{
|
||||
TicketIds: []string{"1", "2"},
|
||||
},
|
||||
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
|
||||
for _, fakeTicket := range fakeTickets {
|
||||
store.CreateTicket(ctx, fakeTicket)
|
||||
store.IndexTicket(ctx, fakeTicket)
|
||||
}
|
||||
// Make sure tickets are correctly indexed.
|
||||
var wantFilteredTickets []*pb.Ticket
|
||||
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
wantFilteredTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
|
||||
|
||||
// Ignore all the tickets
|
||||
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2", "3"})
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Make sure it was properly ignored
|
||||
var ignoredFilterTickets []*pb.Ticket
|
||||
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
ignoredFilterTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(fakeTickets)-3, len(ignoredFilterTickets))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
|
||||
cfg := viper.New()
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
|
||||
test.preAction(ctx, cancel, store, test.pool)
|
||||
|
||||
err := doReleasetickets(ctx, test.req, store)
|
||||
assert.Equal(t, test.wantCode, status.Convert(err).Code())
|
||||
|
||||
if err == nil {
|
||||
// Make sure that the expected tickets are available for query
|
||||
var filteredTickets []*pb.Ticket
|
||||
err = store.FilterTickets(ctx, test.pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
filteredTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(filteredTickets), len(test.expectTickets))
|
||||
|
||||
for _, ticket := range filteredTickets {
|
||||
assert.Contains(t, test.expectTickets, ticket.GetId())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoAssignTickets(t *testing.T) {
|
||||
fakeProperty := "test-property"
|
||||
fakeTickets := []*pb.Ticket{
|
||||
{
|
||||
Id: "1",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
fakeProperty: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Id: "2",
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
fakeProperty: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
preAction func(context.Context, context.CancelFunc, statestore.Service)
|
||||
req *pb.AssignTicketsRequest
|
||||
wantCode codes.Code
|
||||
wantAssignment *pb.Assignment
|
||||
}{
|
||||
{
|
||||
description: "expect unavailable code since context is canceled before being called",
|
||||
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
|
||||
cancel()
|
||||
},
|
||||
req: &pb.AssignTicketsRequest{
|
||||
TicketIds: []string{"1"},
|
||||
Assignment: &pb.Assignment{},
|
||||
},
|
||||
wantCode: codes.Unavailable,
|
||||
},
|
||||
{
|
||||
description: "expect invalid argument code since assignment is nil",
|
||||
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
|
||||
cancel()
|
||||
},
|
||||
req: &pb.AssignTicketsRequest{},
|
||||
wantCode: codes.InvalidArgument,
|
||||
},
|
||||
{
|
||||
description: "expect not found code since ticket does not exist",
|
||||
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
|
||||
req: &pb.AssignTicketsRequest{
|
||||
TicketIds: []string{"1", "2"},
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: "123",
|
||||
},
|
||||
},
|
||||
wantCode: codes.NotFound,
|
||||
},
|
||||
{
|
||||
description: "expect ok code",
|
||||
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service) {
|
||||
for _, fakeTicket := range fakeTickets {
|
||||
store.CreateTicket(ctx, fakeTicket)
|
||||
store.IndexTicket(ctx, fakeTicket)
|
||||
}
|
||||
// Make sure tickets are correctly indexed.
|
||||
var wantFilteredTickets []*pb.Ticket
|
||||
pool := &pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}},
|
||||
}
|
||||
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
wantFilteredTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
|
||||
},
|
||||
req: &pb.AssignTicketsRequest{
|
||||
TicketIds: []string{"1", "2"},
|
||||
Assignment: &pb.Assignment{
|
||||
Connection: "123",
|
||||
},
|
||||
},
|
||||
wantCode: codes.OK,
|
||||
wantAssignment: &pb.Assignment{
|
||||
Connection: "123",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
|
||||
cfg := viper.New()
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
|
||||
test.preAction(ctx, cancel, store)
|
||||
|
||||
err := doAssignTickets(ctx, test.req, store)
|
||||
|
||||
assert.Equal(t, test.wantCode, status.Convert(err).Code())
|
||||
|
||||
if err == nil {
|
||||
for _, id := range test.req.GetTicketIds() {
|
||||
ticket, err := store.GetTicket(ctx, id)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.wantAssignment, ticket.GetAssignment())
|
||||
}
|
||||
|
||||
// Make sure tickets are deindexed after assignment
|
||||
var wantFilteredTickets []*pb.Ticket
|
||||
pool := &pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 2}},
|
||||
}
|
||||
store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
|
||||
wantFilteredTickets = filterTickets
|
||||
return nil
|
||||
})
|
||||
assert.Nil(t, wantFilteredTickets)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODOs: add unit tests to doFetchMatchesFilterSkiplistIds and doFetchMatchesAddSkiplistIds
|
@ -12,15 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package evaluate
|
||||
// Package defaulteval provides a simple score based evaluator.
|
||||
package defaulteval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/sirupsen/logrus"
|
||||
"open-match.dev/open-match/internal/testing/evaluator"
|
||||
"go.opencensus.io/stats/view"
|
||||
"open-match.dev/open-match/internal/app/evaluator"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
@ -29,6 +35,14 @@ var (
|
||||
"app": "evaluator",
|
||||
"component": "evaluator.default",
|
||||
})
|
||||
|
||||
collidedMatchesPerEvaluate = stats.Int64("open-match.dev/defaulteval/collided_matches_per_call", "Number of collided matches per default evaluator call", stats.UnitDimensionless)
|
||||
collidedMatchesPerEvaluateView = &view.View{
|
||||
Measure: collidedMatchesPerEvaluate,
|
||||
Name: "open-match.dev/defaulteval/collided_matches_per_call",
|
||||
Description: "Number of collided matches per default evaluator call",
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
)
|
||||
|
||||
type matchInp struct {
|
||||
@ -36,13 +50,22 @@ type matchInp struct {
|
||||
inp *pb.DefaultEvaluationCriteria
|
||||
}
|
||||
|
||||
// Evaluate is where your custom evaluation logic lives.
|
||||
// This sample evaluator sorts and deduplicates the input matches.
|
||||
func Evaluate(p *evaluator.Params) ([]string, error) {
|
||||
matches := make([]*matchInp, 0, len(p.Matches))
|
||||
// BindService define the initialization steps for this evaluator
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
if err := evaluator.BindServiceFor(evaluate)(p, b); err != nil {
|
||||
return err
|
||||
}
|
||||
b.RegisterViews(collidedMatchesPerEvaluateView)
|
||||
return nil
|
||||
}
|
||||
|
||||
// evaluate sorts the matches by DefaultEvaluationCriteria.Score (optional),
|
||||
// then returns matches which don't collide with previously returned matches.
|
||||
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
|
||||
matches := make([]*matchInp, 0)
|
||||
nilEvlautionInputs := 0
|
||||
|
||||
for _, m := range p.Matches {
|
||||
for m := range in {
|
||||
// Evaluation criteria is optional, but sort it lower than any matches which
|
||||
// provided criteria.
|
||||
inp := &pb.DefaultEvaluationCriteria{
|
||||
@ -83,7 +106,13 @@ func Evaluate(p *evaluator.Params) ([]string, error) {
|
||||
d.maybeAdd(m)
|
||||
}
|
||||
|
||||
return d.resultIDs, nil
|
||||
stats.Record(context.Background(), collidedMatchesPerEvaluate.M(int64(len(matches)-len(d.resultIDs))))
|
||||
|
||||
for _, id := range d.resultIDs {
|
||||
out <- id
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type collidingMatch struct {
|
@ -12,16 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package evaluate
|
||||
package defaulteval
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"open-match.dev/open-match/internal/testing/evaluator"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
@ -114,9 +114,21 @@ func TestEvaluate(t *testing.T) {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
gotMatchIDs, err := Evaluate(&evaluator.Params{Matches: test.testMatches})
|
||||
in := make(chan *pb.Match, 10)
|
||||
out := make(chan string, 10)
|
||||
for _, m := range test.testMatches {
|
||||
in <- m
|
||||
}
|
||||
close(in)
|
||||
|
||||
err := evaluate(context.Background(), in, out)
|
||||
assert.Nil(t, err)
|
||||
|
||||
gotMatchIDs := []string{}
|
||||
close(out)
|
||||
for id := range out {
|
||||
gotMatchIDs = append(gotMatchIDs, id)
|
||||
}
|
||||
assert.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
|
||||
|
||||
for _, mID := range gotMatchIDs {
|
57
internal/app/evaluator/evaluator.go
Normal file
57
internal/app/evaluator/evaluator.go
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package evaluator provides the Evaluator service for Open Match golang harness.
|
||||
package evaluator
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
matchesPerEvaluateRequest = stats.Int64("open-match.dev/evaluator/matches_per_request", "Number of matches sent to the evaluator per request", stats.UnitDimensionless)
|
||||
matchesPerEvaluateResponse = stats.Int64("open-match.dev/evaluator/matches_per_response", "Number of matches returned by the evaluator per response", stats.UnitDimensionless)
|
||||
|
||||
matchesPerEvaluateRequestView = &view.View{
|
||||
Measure: matchesPerEvaluateRequest,
|
||||
Name: "open-match.dev/evaluator/matches_per_request",
|
||||
Description: "Number of matches sent to the evaluator per request",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
matchesPerEvaluateResponseView = &view.View{
|
||||
Measure: matchesPerEvaluateResponse,
|
||||
Name: "open-match.dev/evaluator/matches_per_response",
|
||||
Description: "Number of matches sent to the evaluator per response",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// BindServiceFor creates the evaluator service and binds it to the serving harness.
|
||||
func BindServiceFor(eval Evaluator) appmain.Bind {
|
||||
return func(p *appmain.Params, b *appmain.Bindings) error {
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterEvaluatorServer(s, &evaluatorService{eval})
|
||||
}, pb.RegisterEvaluatorHandlerFromEndpoint)
|
||||
b.RegisterViews(
|
||||
matchesPerEvaluateRequestView,
|
||||
matchesPerEvaluateResponseView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
}
|
@ -16,13 +16,13 @@
|
||||
package evaluator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/stats"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -35,7 +35,7 @@ var (
|
||||
// Evaluator is the function signature for the Evaluator to be implemented by
|
||||
// the user. The harness will pass the Matches to evaluate to the Evaluator
|
||||
// and the Evaluator will return an accepted list of Matches.
|
||||
type Evaluator func(*Params) ([]string, error)
|
||||
type Evaluator func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
|
||||
|
||||
// evaluatorService implements pb.EvaluatorServer, the server generated by
|
||||
// compiling the protobuf, by fulfilling the pb.EvaluatorServer interface.
|
||||
@ -43,51 +43,60 @@ type evaluatorService struct {
|
||||
evaluate Evaluator
|
||||
}
|
||||
|
||||
// Params is the parameters to be passed by the harness to the evaluator.
|
||||
// - logger:
|
||||
// A logger used to generate error/debug logs
|
||||
// - Matches
|
||||
// Matches to be evaluated
|
||||
type Params struct {
|
||||
Logger *logrus.Entry
|
||||
Matches []*pb.Match
|
||||
}
|
||||
|
||||
// Evaluate is this harness's implementation of the gRPC call defined in
|
||||
// api/evaluator.proto.
|
||||
func (s *evaluatorService) Evaluate(stream pb.Evaluator_EvaluateServer) error {
|
||||
var matches = []*pb.Match{}
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches = append(matches, req.GetMatch())
|
||||
}
|
||||
g, ctx := errgroup.WithContext(stream.Context())
|
||||
|
||||
// Run the customized evaluator!
|
||||
results, err := s.evaluate(&Params{
|
||||
Logger: logrus.WithFields(logrus.Fields{
|
||||
"app": "openmatch",
|
||||
"component": "evaluator.implementation",
|
||||
}),
|
||||
Matches: matches,
|
||||
in := make(chan *pb.Match)
|
||||
out := make(chan string)
|
||||
|
||||
g.Go(func() error {
|
||||
defer close(in)
|
||||
count := 0
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case in <- req.Match:
|
||||
count++
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
stats.Record(ctx, matchesPerEvaluateRequest.M(int64(count)))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
g.Go(func() error {
|
||||
defer close(out)
|
||||
return s.evaluate(ctx, in, out)
|
||||
})
|
||||
g.Go(func() error {
|
||||
defer func() {
|
||||
for range out {
|
||||
}
|
||||
}()
|
||||
|
||||
for _, result := range results {
|
||||
if err := stream.Send(&pb.EvaluateResponse{MatchId: result}); err != nil {
|
||||
return err
|
||||
count := 0
|
||||
for id := range out {
|
||||
err := stream.Send(&pb.EvaluateResponse{MatchId: id})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
stats.Record(ctx, matchesPerEvaluateResponse.M(int64(count)))
|
||||
return nil
|
||||
})
|
||||
|
||||
logger.WithFields(logrus.Fields{
|
||||
"results": results,
|
||||
}).Debug("matches accepted by the evaluator")
|
||||
return nil
|
||||
err := g.Wait()
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Error in evaluator.Evaluate")
|
||||
}
|
||||
return err
|
||||
}
|
@ -15,24 +15,47 @@
|
||||
package frontend
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
|
||||
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
|
||||
|
||||
totalBytesPerTicketView = &view.View{
|
||||
Measure: totalBytesPerTicket,
|
||||
Name: "open-match.dev/frontend/total_bytes_per_ticket",
|
||||
Description: "Total bytes per ticket",
|
||||
Aggregation: telemetry.DefaultBytesDistribution,
|
||||
}
|
||||
searchFieldsPerTicketView = &view.View{
|
||||
Measure: searchFieldsPerTicket,
|
||||
Name: "open-match.dev/frontend/searchfields_per_ticket",
|
||||
Description: "SearchFields per ticket",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// BindService creates the frontend service and binds it to the serving harness.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
service := &frontendService{
|
||||
cfg: cfg,
|
||||
store: statestore.New(cfg),
|
||||
cfg: p.Config(),
|
||||
store: statestore.New(p.Config()),
|
||||
}
|
||||
|
||||
p.AddHealthCheckFunc(service.store.HealthCheck)
|
||||
p.AddHandleFunc(func(s *grpc.Server) {
|
||||
b.AddHealthCheckFunc(service.store.HealthCheck)
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterFrontendServiceServer(s, service)
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
|
||||
b.RegisterViews(
|
||||
totalBytesPerTicketView,
|
||||
searchFieldsPerTicketView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -18,14 +18,16 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/rs/xid"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
@ -41,26 +43,28 @@ var (
|
||||
"app": "openmatch",
|
||||
"component": "app.frontend",
|
||||
})
|
||||
mTicketsCreated = telemetry.Counter("frontend/tickets_created", "tickets created")
|
||||
mTicketsDeleted = telemetry.Counter("frontend/tickets_deleted", "tickets deleted")
|
||||
mTicketsRetrieved = telemetry.Counter("frontend/tickets_retrieved", "tickets retrieved")
|
||||
mTicketAssignmentsRetrieved = telemetry.Counter("frontend/tickets_assignments_retrieved", "ticket assignments retrieved")
|
||||
)
|
||||
|
||||
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
|
||||
// A ticket is considered as ready for matchmaking once it is created.
|
||||
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
|
||||
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
|
||||
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.CreateTicketResponse, error) {
|
||||
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.Ticket, error) {
|
||||
// Perform input validation.
|
||||
if req.GetTicket() == nil {
|
||||
if req.Ticket == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, ".ticket is required")
|
||||
}
|
||||
if req.Ticket.Assignment != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with an assignment")
|
||||
}
|
||||
if req.Ticket.CreateTime != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with create time set")
|
||||
}
|
||||
|
||||
return doCreateTicket(ctx, req, s.store)
|
||||
}
|
||||
|
||||
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.CreateTicketResponse, error) {
|
||||
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.Ticket, error) {
|
||||
// Generate a ticket id and create a Ticket in state storage
|
||||
ticket, ok := proto.Clone(req.Ticket).(*pb.Ticket)
|
||||
if !ok {
|
||||
@ -68,6 +72,15 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
|
||||
}
|
||||
|
||||
ticket.Id = xid.New().String()
|
||||
ticket.CreateTime = ptypes.TimestampNow()
|
||||
|
||||
sfCount := 0
|
||||
sfCount += len(ticket.GetSearchFields().GetDoubleArgs())
|
||||
sfCount += len(ticket.GetSearchFields().GetStringArgs())
|
||||
sfCount += len(ticket.GetSearchFields().GetTags())
|
||||
stats.Record(ctx, searchFieldsPerTicket.M(int64(sfCount)))
|
||||
stats.Record(ctx, totalBytesPerTicket.M(int64(proto.Size(ticket))))
|
||||
|
||||
err := store.CreateTicket(ctx, ticket)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
@ -86,21 +99,19 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
|
||||
return &pb.CreateTicketResponse{Ticket: ticket}, nil
|
||||
return ticket, nil
|
||||
}
|
||||
|
||||
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
|
||||
// The client must delete the Ticket when finished matchmaking with it.
|
||||
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
|
||||
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
|
||||
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*pb.DeleteTicketResponse, error) {
|
||||
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*empty.Empty, error) {
|
||||
err := doDeleteTicket(ctx, req.GetTicketId(), s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
|
||||
return &pb.DeleteTicketResponse{}, nil
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
func doDeleteTicket(ctx context.Context, id string, store statestore.Service) error {
|
||||
@ -141,7 +152,6 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
|
||||
|
||||
// GetTicket get the Ticket associated with the specified TicketId.
|
||||
func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketRequest) (*pb.Ticket, error) {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketsRetrieved)
|
||||
return doGetTickets(ctx, req.GetTicketId(), s.store)
|
||||
}
|
||||
|
||||
@ -158,9 +168,9 @@ func doGetTickets(ctx context.Context, id string, store statestore.Service) (*pb
|
||||
return ticket, nil
|
||||
}
|
||||
|
||||
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
|
||||
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
|
||||
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
|
||||
func (s *frontendService) GetAssignments(req *pb.GetAssignmentsRequest, stream pb.FrontendService_GetAssignmentsServer) error {
|
||||
func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
|
||||
ctx := stream.Context()
|
||||
for {
|
||||
select {
|
||||
@ -168,15 +178,14 @@ func (s *frontendService) GetAssignments(req *pb.GetAssignmentsRequest, stream p
|
||||
return ctx.Err()
|
||||
default:
|
||||
sender := func(assignment *pb.Assignment) error {
|
||||
telemetry.RecordUnitMeasurement(ctx, mTicketAssignmentsRetrieved)
|
||||
return stream.Send(&pb.GetAssignmentsResponse{Assignment: assignment})
|
||||
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
|
||||
}
|
||||
return doGetAssignments(ctx, req.GetTicketId(), sender, s.store)
|
||||
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doGetAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
|
||||
func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
|
||||
var currAssignment *pb.Assignment
|
||||
var ok bool
|
||||
callback := func(assignment *pb.Assignment) error {
|
||||
|
@ -68,6 +68,7 @@ func TestDoCreateTickets(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
@ -78,16 +79,16 @@ func TestDoCreateTickets(t *testing.T) {
|
||||
res, err := doCreateTicket(ctx, &pb.CreateTicketRequest{Ticket: test.ticket}, store)
|
||||
assert.Equal(t, test.wantCode, status.Convert(err).Code())
|
||||
if err == nil {
|
||||
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetTicket().GetId())
|
||||
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
|
||||
assert.True(t, matched)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.Ticket.SearchFields.DoubleArgs["test-arg"])
|
||||
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoGetAssignments(t *testing.T) {
|
||||
func TestDoWatchAssignments(t *testing.T) {
|
||||
testTicket := &pb.Ticket{
|
||||
Id: "test-id",
|
||||
}
|
||||
@ -122,7 +123,15 @@ func TestDoGetAssignments(t *testing.T) {
|
||||
go func(wg *sync.WaitGroup) {
|
||||
for i := 0; i < len(wantAssignments); i++ {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
assert.Nil(t, store.UpdateAssignments(ctx, []string{testTicket.GetId()}, wantAssignments[i]))
|
||||
_, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
|
||||
Assignments: []*pb.AssignmentGroup{
|
||||
{
|
||||
TicketIds: []string{testTicket.GetId()},
|
||||
Assignment: wantAssignments[i],
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
wg.Done()
|
||||
}
|
||||
}(wg)
|
||||
@ -145,7 +154,7 @@ func TestDoGetAssignments(t *testing.T) {
|
||||
gotAssignments := []*pb.Assignment{}
|
||||
|
||||
test.preAction(ctx, t, store, test.wantAssignments, &wg)
|
||||
err := doGetAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
|
||||
err := doWatchAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
|
||||
assert.Equal(t, test.wantCode, status.Convert(err).Code())
|
||||
|
||||
wg.Wait()
|
||||
@ -193,6 +202,7 @@ func TestDoDeleteTicket(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
|
||||
@ -246,6 +256,7 @@ func TestDoGetTicket(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
|
||||
|
@ -19,25 +19,24 @@ import (
|
||||
"open-match.dev/open-match/internal/app/frontend"
|
||||
"open-match.dev/open-match/internal/app/query"
|
||||
"open-match.dev/open-match/internal/app/synchronizer"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
)
|
||||
|
||||
// BindService creates the minimatch service to the server Params.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
if err := backend.BindService(p, cfg); err != nil {
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
if err := backend.BindService(p, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := frontend.BindService(p, cfg); err != nil {
|
||||
if err := frontend.BindService(p, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := query.BindService(p, cfg); err != nil {
|
||||
if err := query.BindService(p, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := synchronizer.BindService(p, cfg); err != nil {
|
||||
if err := synchronizer.BindService(p, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -15,25 +15,76 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
|
||||
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
|
||||
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
|
||||
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
|
||||
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
|
||||
|
||||
ticketsPerQueryView = &view.View{
|
||||
Measure: ticketsPerQuery,
|
||||
Name: "open-match.dev/query/tickets_per_query",
|
||||
Description: "Tickets per query",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
cacheTotalItemsView = &view.View{
|
||||
Measure: cacheTotalItems,
|
||||
Name: "open-match.dev/query/total_cached_items",
|
||||
Description: "Total number of cached tickets",
|
||||
Aggregation: view.LastValue(),
|
||||
}
|
||||
cacheFetchedItemsView = &view.View{
|
||||
Measure: cacheFetchedItems,
|
||||
Name: "open-match.dev/query/total_fetched_items",
|
||||
Description: "Total number of fetched tickets",
|
||||
Aggregation: view.Sum(),
|
||||
}
|
||||
cacheUpdateView = &view.View{
|
||||
Measure: cacheWaitingQueries,
|
||||
Name: "open-match.dev/query/cache_updates",
|
||||
Description: "Number of query cache updates in total",
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
cacheWaitingQueriesView = &view.View{
|
||||
Measure: cacheWaitingQueries,
|
||||
Name: "open-match.dev/query/waiting_requests",
|
||||
Description: "Number of waiting requests in total",
|
||||
Aggregation: telemetry.DefaultCountDistribution,
|
||||
}
|
||||
cacheUpdateLatencyView = &view.View{
|
||||
Measure: cacheUpdateLatency,
|
||||
Name: "open-match.dev/query/update_latency",
|
||||
Description: "Time elapsed of each query cache update",
|
||||
Aggregation: telemetry.DefaultMillisecondsDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// BindService creates the query service and binds it to the serving harness.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
service := &queryService{
|
||||
cfg: cfg,
|
||||
store: statestore.New(cfg),
|
||||
cfg: p.Config(),
|
||||
tc: newTicketCache(b, p.Config()),
|
||||
}
|
||||
|
||||
p.AddHealthCheckFunc(service.store.HealthCheck)
|
||||
|
||||
p.AddHandleFunc(func(s *grpc.Server) {
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterQueryServiceServer(s, service)
|
||||
}, pb.RegisterQueryServiceHandlerFromEndpoint)
|
||||
|
||||
b.RegisterViews(
|
||||
ticketsPerQueryView,
|
||||
cacheTotalItemsView,
|
||||
cacheUpdateView,
|
||||
cacheFetchedItemsView,
|
||||
cacheWaitingQueriesView,
|
||||
cacheUpdateLatencyView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -16,14 +16,20 @@ package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
|
||||
"open-match.dev/open-match/internal/filter"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -36,49 +42,101 @@ var (
|
||||
// queryService API provides utility functions for common MMF functionality such
|
||||
// as retreiving Tickets from state storage.
|
||||
type queryService struct {
|
||||
cfg config.View
|
||||
store statestore.Service
|
||||
cfg config.View
|
||||
tc *ticketCache
|
||||
}
|
||||
|
||||
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
|
||||
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
|
||||
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
|
||||
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
|
||||
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
|
||||
ctx := responseServer.Context()
|
||||
pool := req.GetPool()
|
||||
if pool == nil {
|
||||
return status.Error(codes.InvalidArgument, ".pool is required")
|
||||
}
|
||||
|
||||
ctx := responseServer.Context()
|
||||
pSize := getPageSize(s.cfg)
|
||||
|
||||
callback := func(tickets []*pb.Ticket) error {
|
||||
err := responseServer.Send(&pb.QueryTicketsResponse{Tickets: tickets})
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to send Redis response to grpc server")
|
||||
return status.Errorf(codes.Aborted, err.Error())
|
||||
}
|
||||
return nil
|
||||
pf, err := filter.NewPoolFilter(pool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return doQueryTickets(ctx, pool, pSize, callback, s.store)
|
||||
var results []*pb.Ticket
|
||||
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
|
||||
for _, ticket := range tickets {
|
||||
if pf.In(ticket) {
|
||||
results = append(results, ticket)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to run request.")
|
||||
return err
|
||||
}
|
||||
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
|
||||
|
||||
pSize := getPageSize(s.cfg)
|
||||
for start := 0; start < len(results); start += pSize {
|
||||
end := start + pSize
|
||||
if end > len(results) {
|
||||
end = len(results)
|
||||
}
|
||||
|
||||
err := responseServer.Send(&pb.QueryTicketsResponse{
|
||||
Tickets: results[start:end],
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func doQueryTickets(ctx context.Context, pool *pb.Pool, pageSize int, sender func(tickets []*pb.Ticket) error, store statestore.Service) error {
|
||||
// Send requests to the storage service
|
||||
err := store.FilterTickets(ctx, pool, pageSize, sender)
|
||||
func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseServer pb.QueryService_QueryTicketIdsServer) error {
|
||||
ctx := responseServer.Context()
|
||||
pool := req.GetPool()
|
||||
if pool == nil {
|
||||
return status.Error(codes.InvalidArgument, ".pool is required")
|
||||
}
|
||||
|
||||
pf, err := filter.NewPoolFilter(pool)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to retrieve result from storage service.")
|
||||
return err
|
||||
}
|
||||
|
||||
var results []string
|
||||
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
|
||||
for id, ticket := range tickets {
|
||||
if pf.In(ticket) {
|
||||
results = append(results, id)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("Failed to run request.")
|
||||
return err
|
||||
}
|
||||
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
|
||||
|
||||
pSize := getPageSize(s.cfg)
|
||||
for start := 0; start < len(results); start += pSize {
|
||||
end := start + pSize
|
||||
if end > len(results) {
|
||||
end = len(results)
|
||||
}
|
||||
|
||||
err := responseServer.Send(&pb.QueryTicketIdsResponse{
|
||||
Ids: results[start:end],
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPageSize(cfg config.View) int {
|
||||
const (
|
||||
name = "storage.page.size"
|
||||
name = "queryPageSize"
|
||||
// Minimum number of tickets to be returned in a streamed response for QueryTickets. This value
|
||||
// will be used if page size is configured lower than the minimum value.
|
||||
minPageSize int = 10
|
||||
@ -94,7 +152,7 @@ func getPageSize(cfg config.View) int {
|
||||
return defaultPageSize
|
||||
}
|
||||
|
||||
pSize := cfg.GetInt("storage.page.size")
|
||||
pSize := cfg.GetInt(name)
|
||||
if pSize < minPageSize {
|
||||
logger.Infof("page size %v is lower than the minimum limit of %v", pSize, maxPageSize)
|
||||
pSize = minPageSize
|
||||
@ -107,3 +165,159 @@ func getPageSize(cfg config.View) int {
|
||||
|
||||
return pSize
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// ticketCache unifies concurrent requests into a single cache update, and
|
||||
// gives a safe view into that map cache.
|
||||
type ticketCache struct {
|
||||
store statestore.Service
|
||||
|
||||
requests chan *cacheRequest
|
||||
|
||||
// Single item buffered channel. Holds a value when runQuery can be safely
|
||||
// started. Basically a channel/select friendly mutex around runQuery
|
||||
// running.
|
||||
startRunRequest chan struct{}
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Mutlithreaded unsafe fields, only to be written by update, and read when
|
||||
// request given the ok.
|
||||
tickets map[string]*pb.Ticket
|
||||
err error
|
||||
}
|
||||
|
||||
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
|
||||
tc := &ticketCache{
|
||||
store: statestore.New(cfg),
|
||||
requests: make(chan *cacheRequest),
|
||||
startRunRequest: make(chan struct{}, 1),
|
||||
tickets: make(map[string]*pb.Ticket),
|
||||
}
|
||||
|
||||
tc.startRunRequest <- struct{}{}
|
||||
b.AddHealthCheckFunc(tc.store.HealthCheck)
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
type cacheRequest struct {
|
||||
ctx context.Context
|
||||
runNow chan struct{}
|
||||
}
|
||||
|
||||
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
|
||||
cr := &cacheRequest{
|
||||
ctx: ctx,
|
||||
runNow: make(chan struct{}),
|
||||
}
|
||||
|
||||
sendRequest:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
|
||||
case <-tc.startRunRequest:
|
||||
go tc.runRequest()
|
||||
case tc.requests <- cr:
|
||||
break sendRequest
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
|
||||
case <-cr.runNow:
|
||||
defer tc.wg.Done()
|
||||
}
|
||||
|
||||
if tc.err != nil {
|
||||
return tc.err
|
||||
}
|
||||
|
||||
f(tc.tickets)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *ticketCache) runRequest() {
|
||||
defer func() {
|
||||
tc.startRunRequest <- struct{}{}
|
||||
}()
|
||||
|
||||
// Wait for first query request.
|
||||
reqs := []*cacheRequest{<-tc.requests}
|
||||
|
||||
// Collect all waiting queries.
|
||||
collectAllWaiting:
|
||||
for {
|
||||
select {
|
||||
case req := <-tc.requests:
|
||||
reqs = append(reqs, req)
|
||||
default:
|
||||
break collectAllWaiting
|
||||
}
|
||||
}
|
||||
|
||||
tc.update()
|
||||
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
|
||||
|
||||
// Send WaitGroup to query calls, letting them run their query on the ticket
|
||||
// cache.
|
||||
for _, req := range reqs {
|
||||
tc.wg.Add(1)
|
||||
select {
|
||||
case req.runNow <- struct{}{}:
|
||||
case <-req.ctx.Done():
|
||||
tc.wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// wait for requests to finish using ticket cache.
|
||||
tc.wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *ticketCache) update() {
|
||||
st := time.Now()
|
||||
previousCount := len(tc.tickets)
|
||||
|
||||
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
|
||||
if err != nil {
|
||||
tc.err = err
|
||||
return
|
||||
}
|
||||
|
||||
deletedCount := 0
|
||||
for id := range tc.tickets {
|
||||
if _, ok := currentAll[id]; !ok {
|
||||
delete(tc.tickets, id)
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
|
||||
toFetch := []string{}
|
||||
|
||||
for id := range currentAll {
|
||||
if _, ok := tc.tickets[id]; !ok {
|
||||
toFetch = append(toFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
|
||||
if err != nil {
|
||||
tc.err = err
|
||||
return
|
||||
}
|
||||
|
||||
for _, t := range newTickets {
|
||||
tc.tickets[t.Id] = t
|
||||
}
|
||||
|
||||
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
|
||||
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
|
||||
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
|
||||
|
||||
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
|
||||
tc.err = nil
|
||||
}
|
||||
|
@ -15,139 +15,12 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
|
||||
internalTesting "open-match.dev/open-match/internal/testing"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestDoQueryTickets(t *testing.T) {
|
||||
const (
|
||||
DoubleArg1 = "level"
|
||||
DoubleArg2 = "spd"
|
||||
)
|
||||
|
||||
var actualTickets []*pb.Ticket
|
||||
fakeErr := errors.New("some error")
|
||||
|
||||
senderGenerator := func(err error) func(tickets []*pb.Ticket) error {
|
||||
return func(tickets []*pb.Ticket) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actualTickets = tickets
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
testTickets := internalTesting.GenerateFloatRangeTickets(
|
||||
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 20, Interval: 5},
|
||||
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
sender func(tickets []*pb.Ticket) error
|
||||
pool *pb.Pool
|
||||
pageSize int
|
||||
action func(context.Context, *testing.T, statestore.Service)
|
||||
wantErr error
|
||||
wantTickets []*pb.Ticket
|
||||
}{
|
||||
{
|
||||
"expect empty response from an empty store",
|
||||
senderGenerator(nil),
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: DoubleArg1,
|
||||
Min: 0,
|
||||
Max: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
100,
|
||||
func(_ context.Context, _ *testing.T, _ statestore.Service) {},
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"expect tickets with DoubleArg1 value in range of [0, 10] (inclusively)",
|
||||
senderGenerator(nil),
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: DoubleArg1,
|
||||
Min: 0,
|
||||
Max: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
100,
|
||||
func(ctx context.Context, t *testing.T, store statestore.Service) {
|
||||
for _, testTicket := range testTickets {
|
||||
assert.Nil(t, store.CreateTicket(ctx, testTicket))
|
||||
assert.Nil(t, store.IndexTicket(ctx, testTicket))
|
||||
}
|
||||
},
|
||||
nil,
|
||||
internalTesting.GenerateFloatRangeTickets(
|
||||
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 10.1, Interval: 5},
|
||||
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
|
||||
),
|
||||
},
|
||||
{
|
||||
"expect error from canceled context",
|
||||
senderGenerator(fakeErr),
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: DoubleArg1,
|
||||
Min: 0,
|
||||
Max: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
100,
|
||||
func(ctx context.Context, t *testing.T, store statestore.Service) {
|
||||
for _, testTicket := range testTickets {
|
||||
assert.Nil(t, store.CreateTicket(ctx, testTicket))
|
||||
assert.Nil(t, store.IndexTicket(ctx, testTicket))
|
||||
}
|
||||
},
|
||||
status.Errorf(codes.Internal, "%v", fakeErr),
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
cfg.Set("storage.page.size", 1000)
|
||||
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
|
||||
defer closer()
|
||||
|
||||
ctx := utilTesting.NewContext(t)
|
||||
|
||||
test.action(ctx, t, store)
|
||||
assert.Equal(t, test.wantErr, doQueryTickets(ctx, test.pool, test.pageSize, test.sender, store))
|
||||
for _, wantTicket := range test.wantTickets {
|
||||
assert.Contains(t, actualTickets, wantTicket)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPageSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
@ -162,27 +35,28 @@ func TestGetPageSize(t *testing.T) {
|
||||
{
|
||||
"set",
|
||||
func(cfg config.Mutable) {
|
||||
cfg.Set("storage.page.size", "2156")
|
||||
cfg.Set("queryPageSize", "2156")
|
||||
},
|
||||
2156,
|
||||
},
|
||||
{
|
||||
"low",
|
||||
func(cfg config.Mutable) {
|
||||
cfg.Set("storage.page.size", "9")
|
||||
cfg.Set("queryPageSize", "9")
|
||||
},
|
||||
10,
|
||||
},
|
||||
{
|
||||
"high",
|
||||
func(cfg config.Mutable) {
|
||||
cfg.Set("storage.page.size", "10001")
|
||||
cfg.Set("queryPageSize", "10001")
|
||||
},
|
||||
10000,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := viper.New()
|
||||
tt.configure(cfg)
|
||||
|
@ -53,8 +53,6 @@ func RunApplication() {
|
||||
|
||||
func serve(cfg config.View) {
|
||||
mux := &http.ServeMux{}
|
||||
closer := telemetry.Setup("swaggerui", mux, cfg)
|
||||
defer closer()
|
||||
port := cfg.GetInt("api.swaggerui.httpport")
|
||||
baseDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
|
@ -24,11 +24,10 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/omerror"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
@ -41,10 +40,10 @@ var (
|
||||
)
|
||||
|
||||
type evaluator interface {
|
||||
evaluate(context.Context, <-chan []*pb.Match) ([]string, error)
|
||||
evaluate(context.Context, <-chan []*pb.Match, chan<- string) error
|
||||
}
|
||||
|
||||
var errNoEvaluatorType = grpc.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
|
||||
var errNoEvaluatorType = status.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
|
||||
|
||||
func newEvaluator(cfg config.View) evaluator {
|
||||
newInstance := func(cfg config.View) (interface{}, func(), error) {
|
||||
@ -67,17 +66,17 @@ type deferredEvaluator struct {
|
||||
cacher *config.Cacher
|
||||
}
|
||||
|
||||
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
|
||||
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
|
||||
e, err := de.cacher.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
matches, err := e.(evaluator).evaluate(ctx, pc)
|
||||
err = e.(evaluator).evaluate(ctx, pc, acceptedIds)
|
||||
if err != nil {
|
||||
de.cacher.ForceReset()
|
||||
}
|
||||
return matches, err
|
||||
return err
|
||||
}
|
||||
|
||||
type grcpEvaluatorClient struct {
|
||||
@ -88,7 +87,7 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
|
||||
grpcAddr := fmt.Sprintf("%s:%d", cfg.GetString("api.evaluator.hostname"), cfg.GetInt64("api.evaluator.grpcport"))
|
||||
conn, err := rpc.GRPCClientFromEndpoint(cfg, grpcAddr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to create grpc evaluator client: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to create grpc evaluator client: %w", err)
|
||||
}
|
||||
|
||||
evaluatorClientLogger.WithFields(logrus.Fields{
|
||||
@ -107,21 +106,26 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
|
||||
}, close, nil
|
||||
}
|
||||
|
||||
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
|
||||
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var stream pb.Evaluator_EvaluateClient
|
||||
{ // prevent shadowing err later
|
||||
var err error
|
||||
stream, err = ec.evaluator.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error starting evaluator call: %w", err)
|
||||
return fmt.Errorf("error starting evaluator call: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
results := []string{}
|
||||
|
||||
wait := omerror.WaitOnErrors(evaluatorClientLogger, func() error {
|
||||
matchIDs := &sync.Map{}
|
||||
eg.Go(func() error {
|
||||
for proposals := range pc {
|
||||
for _, proposal := range proposals {
|
||||
|
||||
if _, ok := matchIDs.LoadOrStore(proposal.GetMatchId(), true); ok {
|
||||
return fmt.Errorf("multiple match functions used same match_id: \"%s\"", proposal.GetMatchId())
|
||||
}
|
||||
if err := stream.Send(&pb.EvaluateRequest{Match: proposal}); err != nil {
|
||||
return fmt.Errorf("failed to send request to evaluator, desc: %w", err)
|
||||
}
|
||||
@ -132,7 +136,9 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
|
||||
return fmt.Errorf("failed to close the send direction of evaluator stream, desc: %w", err)
|
||||
}
|
||||
return nil
|
||||
}, func() error {
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
for {
|
||||
// TODO: add grpc timeouts for this call.
|
||||
resp, err := stream.Recv()
|
||||
@ -142,15 +148,24 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get response from evaluator client, desc: %w", err)
|
||||
}
|
||||
results = append(results, resp.GetMatchId())
|
||||
|
||||
v, ok := matchIDs.Load(resp.GetMatchId())
|
||||
if !ok {
|
||||
return fmt.Errorf("evaluator returned match_id \"%s\" which does not correspond to its any match in its input", resp.GetMatchId())
|
||||
}
|
||||
if !v.(bool) {
|
||||
return fmt.Errorf("evaluator returned same match_id twice: \"%s\"", resp.GetMatchId())
|
||||
}
|
||||
matchIDs.Store(resp.GetMatchId(), false)
|
||||
acceptedIds <- resp.GetMatchId()
|
||||
}
|
||||
})
|
||||
|
||||
err := wait()
|
||||
err := eg.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return results, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type httpEvaluatorClient struct {
|
||||
@ -179,7 +194,7 @@ func newHTTPEvaluator(cfg config.View) (evaluator, func(), error) {
|
||||
}, close, nil
|
||||
}
|
||||
|
||||
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
|
||||
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
|
||||
reqr, reqw := io.Pipe()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@ -212,14 +227,14 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
|
||||
|
||||
req, err := http.NewRequest("POST", ec.baseURL+"/v1/evaluator/matches:evaluate", reqr)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
|
||||
return status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Transfer-Encoding", "chunked")
|
||||
|
||||
resp, err := ec.httpClient.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
|
||||
return status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if resp.Body.Close() != nil {
|
||||
@ -228,7 +243,6 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
var results = []string{}
|
||||
rc := make(chan error, 1)
|
||||
defer close(rc)
|
||||
go func() {
|
||||
@ -257,16 +271,16 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
|
||||
rc <- status.Errorf(codes.Unavailable, "failed to execute jsonpb.UnmarshalString(%s, &proposal): %v.", item.Result, err)
|
||||
return
|
||||
}
|
||||
results = append(results, resp.GetMatchId())
|
||||
acceptedIds <- resp.GetMatchId()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
if len(sc) != 0 {
|
||||
return nil, <-sc
|
||||
return <-sc
|
||||
}
|
||||
if len(rc) != 0 {
|
||||
return nil, <-rc
|
||||
return <-rc
|
||||
}
|
||||
return results, nil
|
||||
return nil
|
||||
}
|
||||
|
@ -15,21 +15,52 @@
|
||||
package synchronizer
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/ipb"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
)
|
||||
|
||||
var (
|
||||
iterationLatency = stats.Float64("open-match.dev/synchronizer/iteration_latency", "Time elapsed of each synchronizer iteration", stats.UnitMilliseconds)
|
||||
registrationWaitTime = stats.Float64("open-match.dev/synchronizer/registration_wait_time", "Time elapsed of registration wait time", stats.UnitMilliseconds)
|
||||
registrationMMFDoneTime = stats.Float64("open-match.dev/synchronizer/registration_mmf_done_time", "Time elapsed wasted in registration window with done MMFs", stats.UnitMilliseconds)
|
||||
|
||||
iterationLatencyView = &view.View{
|
||||
Measure: iterationLatency,
|
||||
Name: "open-match.dev/synchronizer/iteration_latency",
|
||||
Description: "Time elapsed of each synchronizer iteration",
|
||||
Aggregation: telemetry.DefaultMillisecondsDistribution,
|
||||
}
|
||||
registrationWaitTimeView = &view.View{
|
||||
Measure: registrationWaitTime,
|
||||
Name: "open-match.dev/synchronizer/registration_wait_time",
|
||||
Description: "Time elapsed of registration wait time",
|
||||
Aggregation: telemetry.DefaultMillisecondsDistribution,
|
||||
}
|
||||
registrationMMFDoneTimeView = &view.View{
|
||||
Measure: registrationMMFDoneTime,
|
||||
Name: "open-match.dev/synchronizer/registration_mmf_done_time",
|
||||
Description: "Time elapsed wasted in registration window with done MMFs",
|
||||
Aggregation: telemetry.DefaultMillisecondsDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// BindService creates the synchronizer service and binds it to the serving harness.
|
||||
func BindService(p *rpc.ServerParams, cfg config.View) error {
|
||||
store := statestore.New(cfg)
|
||||
service := newSynchronizerService(cfg, newEvaluator(cfg), store)
|
||||
p.AddHealthCheckFunc(store.HealthCheck)
|
||||
p.AddHandleFunc(func(s *grpc.Server) {
|
||||
func BindService(p *appmain.Params, b *appmain.Bindings) error {
|
||||
store := statestore.New(p.Config())
|
||||
service := newSynchronizerService(p.Config(), newEvaluator(p.Config()), store)
|
||||
b.AddHealthCheckFunc(store.HealthCheck)
|
||||
b.AddHandleFunc(func(s *grpc.Server) {
|
||||
ipb.RegisterSynchronizerServer(s, service)
|
||||
}, nil)
|
||||
|
||||
b.RegisterViews(
|
||||
iterationLatencyView,
|
||||
registrationWaitTimeView,
|
||||
registrationMMFDoneTimeView,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@ -21,7 +21,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"open-match.dev/open-match/internal/appmain/contextcause"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/ipb"
|
||||
"open-match.dev/open-match/internal/statestore"
|
||||
@ -123,7 +126,11 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
|
||||
select {
|
||||
case mIDs, ok := <-m6cBuffer:
|
||||
if !ok {
|
||||
return nil
|
||||
// Prevent race: An error will result in this channel being
|
||||
// closed as part of cleanup. If it's especially fast, it may
|
||||
// beat the context done case, so be sure to return any
|
||||
// potential error.
|
||||
return registration.cycleCtx.Err()
|
||||
}
|
||||
for _, mID := range mIDs {
|
||||
err = stream.Send(&ipb.SynchronizeResponse{MatchId: mID})
|
||||
@ -181,6 +188,9 @@ func (s synchronizerService) register(ctx context.Context) *registration {
|
||||
resp: make(chan *registration),
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
st := time.Now()
|
||||
defer stats.Record(ctx, registrationWaitTime.M(float64(time.Since(st))/float64(time.Millisecond)))
|
||||
for {
|
||||
select {
|
||||
case s.synchronizeRegistration <- req:
|
||||
@ -198,8 +208,9 @@ func (s synchronizerService) register(ctx context.Context) *registration {
|
||||
///////////////////////////////////////
|
||||
|
||||
func (s *synchronizerService) runCycle() {
|
||||
cst := time.Now()
|
||||
/////////////////////////////////////// Initialize cycle
|
||||
ctx, cancel := withCancelCause(context.Background())
|
||||
ctx, cancel := contextcause.WithCancelCause(context.Background())
|
||||
|
||||
m2c := make(chan mAndM6c)
|
||||
m3c := make(chan *pb.Match)
|
||||
@ -236,6 +247,7 @@ func (s *synchronizerService) runCycle() {
|
||||
}()
|
||||
|
||||
/////////////////////////////////////// Run Registration Period
|
||||
rst := time.Now()
|
||||
closeRegistration := time.After(s.registrationInterval())
|
||||
Registration:
|
||||
for {
|
||||
@ -268,6 +280,7 @@ Registration:
|
||||
go func() {
|
||||
allM1cSent.Wait()
|
||||
m1c.cutoff()
|
||||
stats.Record(ctx, registrationMMFDoneTime.M(float64((s.registrationInterval()-time.Since(rst))/time.Millisecond)))
|
||||
}()
|
||||
|
||||
cancelProposalCollection := time.AfterFunc(s.proposalCollectionInterval(), func() {
|
||||
@ -277,6 +290,7 @@ Registration:
|
||||
}
|
||||
})
|
||||
<-closedOnCycleEnd
|
||||
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
|
||||
|
||||
// Clean up in case it was never needed.
|
||||
cancelProposalCollection.Stop()
|
||||
@ -387,13 +401,9 @@ func (c *cutoffSender) cutoff() {
|
||||
///////////////////////////////////////
|
||||
|
||||
// Calls the evaluator with the matches.
|
||||
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel cancelErrFunc, m3c <-chan []*pb.Match, m5c chan<- string) {
|
||||
matchIDs, err := s.eval.evaluate(ctx, m3c)
|
||||
if err == nil {
|
||||
for _, mID := range matchIDs {
|
||||
m5c <- mID
|
||||
}
|
||||
} else {
|
||||
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel contextcause.CancelErrFunc, m4c <-chan []*pb.Match, m5c chan<- string) {
|
||||
err := s.eval.evaluate(ctx, m4c, m5c)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
}).Error("error calling evaluator, canceling cycle")
|
||||
@ -428,7 +438,7 @@ func getTicketIds(tickets []*pb.Ticket) []string {
|
||||
// ignorelist. If it partially fails for whatever reason (not all tickets will
|
||||
// nessisarily be in the same call), only the matches which can be safely
|
||||
// returned to the Synchronize calls are.
|
||||
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel cancelErrFunc, m5c <-chan []string, m6c chan<- string) {
|
||||
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
|
||||
totalMatches := 0
|
||||
successfulMatches := 0
|
||||
var lastErr error
|
||||
@ -476,7 +486,7 @@ func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *syn
|
||||
|
||||
func (s *synchronizerService) registrationInterval() time.Duration {
|
||||
const (
|
||||
name = "synchronizer.registrationIntervalMs"
|
||||
name = "registrationInterval"
|
||||
defaultInterval = time.Second
|
||||
)
|
||||
|
||||
@ -489,7 +499,7 @@ func (s *synchronizerService) registrationInterval() time.Duration {
|
||||
|
||||
func (s *synchronizerService) proposalCollectionInterval() time.Duration {
|
||||
const (
|
||||
name = "synchronizer.proposalCollectionIntervalMs"
|
||||
name = "proposalCollectionInterval"
|
||||
defaultInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
@ -578,46 +588,3 @@ func bufferStringChannel(in chan string) chan []string {
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
///////////////////////////////////////
|
||||
///////////////////////////////////////
|
||||
|
||||
// withCancelCause returns a copy of parent with a new Done channel. The
|
||||
// returned context's Done channel is closed when the returned cancel function
|
||||
// is called or when the parent context's Done channel is closed, whichever
|
||||
// happens first. Unlike the conext package's WithCancel, the cancel func takes
|
||||
// an error, and will return that error on subsequent calls to Err().
|
||||
func withCancelCause(parent context.Context) (context.Context, cancelErrFunc) {
|
||||
parent, cancel := context.WithCancel(parent)
|
||||
|
||||
ctx := &contextWithCancelCause{
|
||||
Context: parent,
|
||||
}
|
||||
|
||||
return ctx, func(err error) {
|
||||
ctx.m.Lock()
|
||||
defer ctx.m.Unlock()
|
||||
|
||||
if ctx.err == nil && parent.Err() == nil {
|
||||
ctx.err = err
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
type cancelErrFunc func(err error)
|
||||
|
||||
type contextWithCancelCause struct {
|
||||
context.Context
|
||||
m sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *contextWithCancelCause) Err() error {
|
||||
ctx.m.Lock()
|
||||
defer ctx.m.Unlock()
|
||||
if ctx.err == nil {
|
||||
return ctx.Context.Err()
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
220
internal/appmain/appmain.go
Normal file
220
internal/appmain/appmain.go
Normal file
@ -0,0 +1,220 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package appmain contains the common application initialization code for Open Match servers.
|
||||
package appmain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.opencensus.io/stats/view"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/logging"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "openmatch",
|
||||
"component": "app.main",
|
||||
})
|
||||
)
|
||||
|
||||
// RunApplication starts and runs the given application forever. For use in
|
||||
// main functions to run the full application.
|
||||
func RunApplication(serviceName string, bindService Bind) {
|
||||
c := make(chan os.Signal, 1)
|
||||
// SIGTERM is signaled by k8s when it wants a pod to stop.
|
||||
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
|
||||
|
||||
readConfig := func() (config.View, error) {
|
||||
return config.Read()
|
||||
}
|
||||
|
||||
a, err := NewApplication(serviceName, bindService, readConfig, net.Listen)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
<-c
|
||||
err = a.Stop()
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
logger.Info("Application stopped successfully.")
|
||||
}
|
||||
|
||||
// Bind is a function which starts an application, and binds it to serving.
|
||||
type Bind func(p *Params, b *Bindings) error
|
||||
|
||||
// Params are inputs to starting an application.
|
||||
type Params struct {
|
||||
config config.View
|
||||
serviceName string
|
||||
}
|
||||
|
||||
// Config provides the configuration for the application.
|
||||
func (p *Params) Config() config.View {
|
||||
return p.config
|
||||
}
|
||||
|
||||
// ServiceName is a name for the currently running binary specified by
|
||||
// RunApplication.
|
||||
func (p *Params) ServiceName() string {
|
||||
return p.serviceName
|
||||
}
|
||||
|
||||
// Bindings allows applications to bind various functions to the running servers.
|
||||
type Bindings struct {
|
||||
sp *rpc.ServerParams
|
||||
a *App
|
||||
firstErr error
|
||||
}
|
||||
|
||||
// AddHealthCheckFunc allows an application to check if it is healthy, and
|
||||
// contribute to the overall server health.
|
||||
func (b *Bindings) AddHealthCheckFunc(f func(context.Context) error) {
|
||||
b.sp.AddHealthCheckFunc(f)
|
||||
}
|
||||
|
||||
// RegisterViews begins collecting data for the given views.
|
||||
func (b *Bindings) RegisterViews(v ...*view.View) {
|
||||
if err := view.Register(v...); err != nil {
|
||||
if b.firstErr == nil {
|
||||
b.firstErr = err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
b.AddCloser(func() {
|
||||
view.Unregister(v...)
|
||||
})
|
||||
}
|
||||
|
||||
// AddHandleFunc adds a protobuf service to the grpc server which is starting.
|
||||
func (b *Bindings) AddHandleFunc(handlerFunc rpc.GrpcHandler, grpcProxyHandler rpc.GrpcProxyHandler) {
|
||||
b.sp.AddHandleFunc(handlerFunc, grpcProxyHandler)
|
||||
}
|
||||
|
||||
// TelemetryHandle adds a handler to the mux for serving debug info and metrics.
|
||||
func (b *Bindings) TelemetryHandle(pattern string, handler http.Handler) {
|
||||
b.sp.ServeMux.Handle(pattern, handler)
|
||||
}
|
||||
|
||||
// TelemetryHandleFunc adds a handlerfunc to the mux for serving debug info and metrics.
|
||||
func (b *Bindings) TelemetryHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
|
||||
b.sp.ServeMux.HandleFunc(pattern, handler)
|
||||
}
|
||||
|
||||
// AddCloser specifies a function to be called when the application is being
|
||||
// stopped. Closers are called in reverse order.
|
||||
func (b *Bindings) AddCloser(c func()) {
|
||||
b.a.closers = append(b.a.closers, func() error {
|
||||
c()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// AddCloserErr specifies a function to be called when the application is being
|
||||
// stopped. Closers are called in reverse order. The first error returned by
|
||||
// a closer will be logged.
|
||||
func (b *Bindings) AddCloserErr(c func() error) {
|
||||
b.a.closers = append(b.a.closers, c)
|
||||
}
|
||||
|
||||
// App is used internally, and public only for apptest. Do not use, and use apptest instead.
|
||||
type App struct {
|
||||
closers []func() error
|
||||
}
|
||||
|
||||
// NewApplication is used internally, and public only for apptest. Do not use, and use apptest instead.
|
||||
func NewApplication(serviceName string, bindService Bind, getCfg func() (config.View, error), listen func(network, address string) (net.Listener, error)) (*App, error) {
|
||||
a := &App{}
|
||||
|
||||
cfg, err := getCfg()
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatalf("cannot read configuration.")
|
||||
}
|
||||
logging.ConfigureLogging(cfg)
|
||||
sp, err := rpc.NewServerParamsFromConfig(cfg, "api."+serviceName, listen)
|
||||
if err != nil {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatalf("cannot construct server.")
|
||||
}
|
||||
|
||||
p := &Params{
|
||||
config: cfg,
|
||||
serviceName: serviceName,
|
||||
}
|
||||
b := &Bindings{
|
||||
a: a,
|
||||
sp: sp,
|
||||
}
|
||||
|
||||
err = telemetry.Setup(p, b)
|
||||
if err != nil {
|
||||
surpressedErr := a.Stop() // Don't care about additional errors stopping.
|
||||
_ = surpressedErr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = bindService(p, b)
|
||||
if err != nil {
|
||||
surpressedErr := a.Stop() // Don't care about additional errors stopping.
|
||||
_ = surpressedErr
|
||||
return nil, err
|
||||
}
|
||||
if b.firstErr != nil {
|
||||
surpressedErr := a.Stop() // Don't care about additional errors stopping.
|
||||
_ = surpressedErr
|
||||
return nil, b.firstErr
|
||||
}
|
||||
|
||||
s := &rpc.Server{}
|
||||
err = s.Start(sp)
|
||||
if err != nil {
|
||||
surpressedErr := a.Stop() // Don't care about additional errors stopping.
|
||||
_ = surpressedErr
|
||||
return nil, err
|
||||
}
|
||||
b.AddCloserErr(s.Stop)
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Stop is used internally, and public only for apptest. Do not use, and use apptest instead.
|
||||
func (a *App) Stop() error {
|
||||
// Use closers in reverse order: Since dependencies are created before
|
||||
// their dependants, this helps ensure no dependencies are closed
|
||||
// unexpectedly.
|
||||
var firstErr error
|
||||
for i := len(a.closers) - 1; i >= 0; i-- {
|
||||
err := a.closers[i]()
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
}
|
156
internal/appmain/apptest/apptest.go
Normal file
156
internal/appmain/apptest/apptest.go
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package apptest allows testing of binded services within memory.
|
||||
package apptest
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/appmain"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
)
|
||||
|
||||
// ServiceName is a constant used for all in memory tests.
|
||||
const ServiceName = "test"
|
||||
|
||||
// TestApp starts an application for testing. It will automatically stop after
|
||||
// the test completes, and immediately fail the test if there is an error
|
||||
// starting. The caller must provide the listers to use for the app, this way
|
||||
// the listeners can use a random port, and set the proper values on the config.
|
||||
func TestApp(t *testing.T, cfg config.View, listeners []net.Listener, binds ...appmain.Bind) {
|
||||
ls, err := newListenerStorage(listeners)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
getCfg := func() (config.View, error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
app, err := appmain.NewApplication(ServiceName, bindAll(binds), getCfg, ls.listen)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
err := app.Stop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RunInCluster allows for running services during an in cluster e2e test.
|
||||
// This is NOT for running the actual code under test, but instead allow running
|
||||
// auxiliary services the code under test might call.
|
||||
func RunInCluster(binds ...appmain.Bind) (func() error, error) {
|
||||
readConfig := func() (config.View, error) {
|
||||
return config.Read()
|
||||
}
|
||||
|
||||
app, err := appmain.NewApplication(ServiceName, bindAll(binds), readConfig, net.Listen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return app.Stop, nil
|
||||
}
|
||||
|
||||
func bindAll(binds []appmain.Bind) appmain.Bind {
|
||||
return func(p *appmain.Params, b *appmain.Bindings) error {
|
||||
for _, bind := range binds {
|
||||
bindErr := bind(p, b)
|
||||
if bindErr != nil {
|
||||
return bindErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newFullAddr(network, address string) (fullAddr, error) {
|
||||
a := fullAddr{
|
||||
network: network,
|
||||
}
|
||||
var err error
|
||||
a.host, a.port, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return fullAddr{}, err
|
||||
}
|
||||
// Usually listeners are started with an "unspecified" ip address, which has
|
||||
// several equivalent forms: ":80", "0.0.0.0:80", "[::]:80". Even if the
|
||||
// callers use the same form, the listeners may return a different form when
|
||||
// asked for its address. So detect and revert to the simpler form.
|
||||
if net.ParseIP(a.host).IsUnspecified() {
|
||||
a.host = ""
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
type fullAddr struct {
|
||||
network string
|
||||
host string
|
||||
port string
|
||||
}
|
||||
|
||||
type listenerStorage struct {
|
||||
l map[fullAddr]net.Listener
|
||||
}
|
||||
|
||||
func newListenerStorage(listeners []net.Listener) (*listenerStorage, error) {
|
||||
ls := &listenerStorage{
|
||||
l: make(map[fullAddr]net.Listener),
|
||||
}
|
||||
for _, l := range listeners {
|
||||
a, err := newFullAddr(l.Addr().Network(), l.Addr().String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls.l[a] = l
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
func (ls *listenerStorage) listen(network, address string) (net.Listener, error) {
|
||||
a, err := newFullAddr(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l, ok := ls.l[a]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Listener for \"%s\" was not passed to TestApp or was already used", address)
|
||||
}
|
||||
delete(ls.l, a)
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// GRPCClient creates a new client which connects to the specified service. It
|
||||
// immediately fails the test if there is an error, and will also automatically
|
||||
// close after the test completes.
|
||||
func GRPCClient(t *testing.T, cfg config.View, service string) *grpc.ClientConn {
|
||||
conn, err := rpc.GRPCClientFromConfig(cfg, service)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
err := conn.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
return conn
|
||||
}
|
62
internal/appmain/contextcause/contextcause.go
Normal file
62
internal/appmain/contextcause/contextcause.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package contextcause
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// WithCancelCause returns a copy of parent with a new Done channel. The
|
||||
// returned context's Done channel is closed when the returned cancel function
|
||||
// is called or when the parent context's Done channel is closed, whichever
|
||||
// happens first. Unlike the conext package's WithCancel, the cancel func takes
|
||||
// an error, and will return that error on subsequent calls to Err().
|
||||
func WithCancelCause(parent context.Context) (context.Context, CancelErrFunc) {
|
||||
parent, cancel := context.WithCancel(parent)
|
||||
|
||||
ctx := &contextWithCancelCause{
|
||||
Context: parent,
|
||||
}
|
||||
|
||||
return ctx, func(err error) {
|
||||
ctx.m.Lock()
|
||||
defer ctx.m.Unlock()
|
||||
|
||||
if ctx.err == nil && parent.Err() == nil {
|
||||
ctx.err = err
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// CancelErrFunc cancels a context simular to context.CancelFunc. However it
|
||||
// indicates why the context was canceled with the provided error.
|
||||
type CancelErrFunc func(err error)
|
||||
|
||||
type contextWithCancelCause struct {
|
||||
context.Context
|
||||
m sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *contextWithCancelCause) Err() error {
|
||||
ctx.m.Lock()
|
||||
defer ctx.m.Unlock()
|
||||
if ctx.err == nil {
|
||||
return ctx.Context.Err()
|
||||
}
|
||||
return ctx.err
|
||||
}
|
64
internal/appmain/contextcause/contextcause_test.go
Normal file
64
internal/appmain/contextcause/contextcause_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package contextcause
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var errExample = errors.New("errExample")
|
||||
|
||||
func TestCauseOverride(t *testing.T) {
|
||||
parent, cancelParent := context.WithCancel(context.Background())
|
||||
ctx, cancel := WithCancelCause(parent)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.FailNow()
|
||||
default:
|
||||
}
|
||||
|
||||
cancel(errExample)
|
||||
<-ctx.Done()
|
||||
require.Equal(t, errExample, ctx.Err())
|
||||
|
||||
cancel(errors.New("second error"))
|
||||
require.Equal(t, errExample, ctx.Err())
|
||||
|
||||
cancelParent()
|
||||
require.Equal(t, errExample, ctx.Err())
|
||||
}
|
||||
|
||||
func TestParentCanceledFirst(t *testing.T) {
|
||||
parent, cancelParent := context.WithCancel(context.Background())
|
||||
ctx, cancel := WithCancelCause(parent)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.FailNow()
|
||||
default:
|
||||
}
|
||||
|
||||
cancelParent()
|
||||
<-ctx.Done()
|
||||
require.Equal(t, context.Canceled, ctx.Err())
|
||||
|
||||
cancel(errExample)
|
||||
require.Equal(t, context.Canceled, ctx.Err())
|
||||
}
|
@ -133,8 +133,10 @@ var getTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
//nolint: gocritic, staticcheck
|
||||
func Test_Get(t *testing.T) {
|
||||
for _, tt := range getTests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.verifySame == nil {
|
||||
tt.verifySame = func(a, b interface{}) bool {
|
||||
|
@ -17,13 +17,14 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/spf13/viper"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Read sets default to a viper instance and read user config to override these defaults.
|
||||
func Read() (View, error) {
|
||||
func Read() (*viper.Viper, error) {
|
||||
var err error
|
||||
// read configs from config/default/matchmaker_config_default.yaml
|
||||
// matchmaker_config_default provides default values for all of the possible tunnable parameters in Open Match
|
||||
|
136
internal/filter/filter.go
Normal file
136
internal/filter/filter.go
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package filter defines which tickets pass which filters. Other implementations which help
|
||||
// filter tickets (eg, a range index lookup) must conform to the same set of tickets being within
|
||||
// the filter as here.
|
||||
package filter
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
var emptySearchFields = &pb.SearchFields{}
|
||||
|
||||
var (
|
||||
logger = logrus.WithFields(logrus.Fields{
|
||||
"app": "openmatch",
|
||||
"component": "filter",
|
||||
})
|
||||
)
|
||||
|
||||
// PoolFilter contains all the filtering criteria from a Pool that the Ticket
|
||||
// needs to meet to belong to that Pool.
|
||||
type PoolFilter struct {
|
||||
DoubleRangeFilters []*pb.DoubleRangeFilter
|
||||
StringEqualsFilters []*pb.StringEqualsFilter
|
||||
TagPresentFilters []*pb.TagPresentFilter
|
||||
CreatedBefore time.Time
|
||||
CreatedAfter time.Time
|
||||
}
|
||||
|
||||
// NewPoolFilter validates a Pool's filtering criteria and returns a PoolFilter.
|
||||
func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
|
||||
var ca, cb time.Time
|
||||
var err error
|
||||
|
||||
if pool.GetCreatedBefore() != nil {
|
||||
if cb, err = ptypes.Timestamp(pool.GetCreatedBefore()); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, ".invalid created_before value")
|
||||
}
|
||||
}
|
||||
|
||||
if pool.GetCreatedAfter() != nil {
|
||||
if ca, err = ptypes.Timestamp(pool.GetCreatedAfter()); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, ".invalid created_after value")
|
||||
}
|
||||
}
|
||||
|
||||
return &PoolFilter{
|
||||
DoubleRangeFilters: pool.GetDoubleRangeFilters(),
|
||||
StringEqualsFilters: pool.GetStringEqualsFilters(),
|
||||
TagPresentFilters: pool.GetTagPresentFilters(),
|
||||
CreatedBefore: cb,
|
||||
CreatedAfter: ca,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// In returns true if the Ticket meets all the criteria for this PoolFilter.
|
||||
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
|
||||
s := ticket.GetSearchFields()
|
||||
if s == nil {
|
||||
s = emptySearchFields
|
||||
}
|
||||
|
||||
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
|
||||
// CreateTime is only populated by Open Match and hence expected to be valid.
|
||||
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
|
||||
if !pf.CreatedAfter.IsZero() {
|
||||
if !ct.After(pf.CreatedAfter) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if !pf.CreatedBefore.IsZero() {
|
||||
if !ct.Before(pf.CreatedBefore) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
"id": ticket.GetId(),
|
||||
}).Error("failed to get time from Timestamp proto")
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range pf.DoubleRangeFilters {
|
||||
v, ok := s.DoubleArgs[f.DoubleArg]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Not simplified so that NaN cases are handled correctly.
|
||||
if !(v >= f.Min && v <= f.Max) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range pf.StringEqualsFilters {
|
||||
v, ok := s.StringArgs[f.StringArg]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if f.Value != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, f := range pf.TagPresentFilters {
|
||||
for _, v := range s.Tags {
|
||||
if v == f.Tag {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
92
internal/filter/filter_test.go
Normal file
92
internal/filter/filter_test.go
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"open-match.dev/open-match/internal/filter/testcases"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
func TestMeetsCriteria(t *testing.T) {
|
||||
for _, tc := range testcases.IncludedTestCases() {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
pf, err := NewPoolFilter(tc.Pool)
|
||||
if err != nil {
|
||||
t.Error("pool should be valid")
|
||||
}
|
||||
tc.Ticket.CreateTime = ptypes.TimestampNow()
|
||||
if !pf.In(tc.Ticket) {
|
||||
t.Error("ticket should be included in the pool")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for _, tc := range testcases.ExcludedTestCases() {
|
||||
tc := tc
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
pf, err := NewPoolFilter(tc.Pool)
|
||||
if err != nil {
|
||||
t.Error("pool should be valid")
|
||||
}
|
||||
tc.Ticket.CreateTime = ptypes.TimestampNow()
|
||||
if pf.In(tc.Ticket) {
|
||||
t.Error("ticket should be excluded from the pool")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidPoolFilter(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
pool *pb.Pool
|
||||
code codes.Code
|
||||
msg string
|
||||
}{
|
||||
{
|
||||
"invalid create before",
|
||||
&pb.Pool{
|
||||
CreatedBefore: ×tamp.Timestamp{Nanos: -1},
|
||||
},
|
||||
codes.InvalidArgument,
|
||||
".invalid created_before value",
|
||||
},
|
||||
{
|
||||
"invalid create after",
|
||||
&pb.Pool{
|
||||
CreatedAfter: ×tamp.Timestamp{Nanos: -1},
|
||||
},
|
||||
codes.InvalidArgument,
|
||||
".invalid created_after value",
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pf, err := NewPoolFilter(tc.pool)
|
||||
assert.Nil(t, pf)
|
||||
s := status.Convert(err)
|
||||
assert.Equal(t, tc.code, s.Code())
|
||||
assert.Equal(t, tc.msg, s.Message())
|
||||
})
|
||||
}
|
||||
}
|
413
internal/filter/testcases/testcases.go
Normal file
413
internal/filter/testcases/testcases.go
Normal file
@ -0,0 +1,413 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package testcases contains lists of ticket filtering test cases.
|
||||
package testcases
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
)
|
||||
|
||||
// TestCase defines a single filtering test case to run.
|
||||
type TestCase struct {
|
||||
Name string
|
||||
Ticket *pb.Ticket
|
||||
Pool *pb.Pool
|
||||
}
|
||||
|
||||
// IncludedTestCases returns a list of test cases where using the given filter,
|
||||
// the ticket is included in the result.
|
||||
func IncludedTestCases() []TestCase {
|
||||
now := time.Now()
|
||||
return []TestCase{
|
||||
{
|
||||
"no filters or fields",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{},
|
||||
},
|
||||
|
||||
simpleDoubleRange("simpleInRange", 5, 0, 10),
|
||||
simpleDoubleRange("exactMatch", 5, 5, 5),
|
||||
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
|
||||
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
|
||||
|
||||
{
|
||||
"String equals simple positive",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"TagPresent simple positive",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"mytag",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "mytag",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"TagPresent multiple all present",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "A",
|
||||
},
|
||||
{
|
||||
Tag: "C",
|
||||
},
|
||||
{
|
||||
Tag: "B",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
multipleFilters(true, true, true),
|
||||
|
||||
{
|
||||
"CreatedBefore simple positive",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"CreatedAfter simple positive",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Between CreatedBefore and CreatedAfter positive",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"No time search criteria positive",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ExcludedTestCases returns a list of test cases where using the given filter,
|
||||
// the ticket is NOT included in the result.
|
||||
func ExcludedTestCases() []TestCase {
|
||||
now := time.Now()
|
||||
return []TestCase{
|
||||
{
|
||||
"DoubleRange no SearchFields",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: "field",
|
||||
Min: math.Inf(-1),
|
||||
Max: math.Inf(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"StringEquals no SearchFields",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"TagPresent no SearchFields",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"double range missing field",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"otherfield": 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: "field",
|
||||
Min: math.Inf(-1),
|
||||
Max: math.Inf(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
simpleDoubleRange("valueTooLow", -1, 0, 10),
|
||||
simpleDoubleRange("valueTooHigh", 11, 0, 10),
|
||||
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
|
||||
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
|
||||
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
|
||||
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
|
||||
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
|
||||
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
|
||||
|
||||
{
|
||||
"String equals simple negative", // and case sensitivity
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"field": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field",
|
||||
Value: "VALUE",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"String equals missing field",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
StringArgs: map[string]string{
|
||||
"otherfield": "othervalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "field",
|
||||
Value: "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"TagPresent simple negative", // and case sensitivity
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"MYTAG",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "mytag",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"TagPresent multiple with one missing",
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
Tags: []string{
|
||||
"A", "B", "C",
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "A",
|
||||
},
|
||||
{
|
||||
Tag: "D",
|
||||
},
|
||||
{
|
||||
Tag: "C",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"CreatedBefore simple negative",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"CreatedAfter simple negative",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Created before time range negative",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Created after time range negative",
|
||||
&pb.Ticket{},
|
||||
&pb.Pool{
|
||||
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
|
||||
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
|
||||
},
|
||||
},
|
||||
|
||||
multipleFilters(false, true, true),
|
||||
multipleFilters(true, false, true),
|
||||
multipleFilters(true, true, false),
|
||||
}
|
||||
}
|
||||
|
||||
func simpleDoubleRange(name string, value, min, max float64) TestCase {
|
||||
return TestCase{
|
||||
"double range " + name,
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"field": value,
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: "field",
|
||||
Min: min,
|
||||
Max: max,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
|
||||
a := float64(0)
|
||||
if !doubleRange {
|
||||
a = 10
|
||||
}
|
||||
|
||||
b := "hi"
|
||||
if !stringEquals {
|
||||
b = "bye"
|
||||
}
|
||||
|
||||
c := "yo"
|
||||
if !tagPresent {
|
||||
c = "cya"
|
||||
}
|
||||
|
||||
return TestCase{
|
||||
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
|
||||
&pb.Ticket{
|
||||
SearchFields: &pb.SearchFields{
|
||||
DoubleArgs: map[string]float64{
|
||||
"a": a,
|
||||
},
|
||||
StringArgs: map[string]string{
|
||||
"b": b,
|
||||
},
|
||||
Tags: []string{c},
|
||||
},
|
||||
},
|
||||
&pb.Pool{
|
||||
DoubleRangeFilters: []*pb.DoubleRangeFilter{
|
||||
{
|
||||
DoubleArg: "a",
|
||||
Min: -1,
|
||||
Max: 1,
|
||||
},
|
||||
},
|
||||
StringEqualsFilters: []*pb.StringEqualsFilter{
|
||||
{
|
||||
StringArg: "b",
|
||||
Value: "hi",
|
||||
},
|
||||
},
|
||||
TagPresentFilters: []*pb.TagPresentFilter{
|
||||
{
|
||||
Tag: "yo",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func timestamp(t time.Time) *tspb.Timestamp {
|
||||
tsp, err := ptypes.TimestampProto(t)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tsp
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package omerror
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ProtoFromErr converts an error into a grpc status. It differs from
|
||||
// google.golang.org/grpc/status in that it will return an OK code on nil, and
|
||||
// returns the proper codes for context cancelation and deadline exceeded.
|
||||
func ProtoFromErr(err error) *spb.Status {
|
||||
switch err {
|
||||
case nil:
|
||||
return &spb.Status{Code: int32(codes.OK)}
|
||||
case context.DeadlineExceeded:
|
||||
fallthrough
|
||||
case context.Canceled:
|
||||
return status.FromContextError(err).Proto()
|
||||
default:
|
||||
return status.Convert(err).Proto()
|
||||
}
|
||||
}
|
||||
|
||||
// WaitFunc will wait until all called functions return. WaitFunc returns the
|
||||
// first error returned, otherwise it returns nil.
|
||||
type WaitFunc func() error
|
||||
|
||||
// WaitOnErrors immediately starts a new go routine for each function passed it.
|
||||
// It returns a WaitFunc. Any additional errors not returned are instead logged.
|
||||
func WaitOnErrors(logger *logrus.Entry, fs ...func() error) WaitFunc {
|
||||
errors := make(chan error, len(fs))
|
||||
for _, f := range fs {
|
||||
go func(f func() error) {
|
||||
errors <- f()
|
||||
}(f)
|
||||
}
|
||||
|
||||
return func() error {
|
||||
var first error
|
||||
for range fs {
|
||||
err := <-errors
|
||||
if first == nil {
|
||||
first = err
|
||||
} else {
|
||||
if err != nil {
|
||||
logger.WithError(err).Warning("Multiple errors occurred in parallel execution. This error is suppressed by the error returned.")
|
||||
}
|
||||
}
|
||||
}
|
||||
return first
|
||||
}
|
||||
}
|
@ -1,123 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package omerror
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestProtoFromErr(t *testing.T) {
|
||||
tests := []struct {
|
||||
err error
|
||||
want *spb.Status
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
&spb.Status{Code: int32(codes.OK)},
|
||||
},
|
||||
{
|
||||
context.Canceled,
|
||||
&spb.Status{Code: int32(codes.Canceled), Message: "context canceled"},
|
||||
},
|
||||
{
|
||||
context.DeadlineExceeded,
|
||||
&spb.Status{Code: int32(codes.DeadlineExceeded), Message: "context deadline exceeded"},
|
||||
},
|
||||
{
|
||||
fmt.Errorf("monkeys with no hats"),
|
||||
&spb.Status{Code: int32(codes.Unknown), Message: "monkeys with no hats"},
|
||||
},
|
||||
{
|
||||
status.Errorf(codes.Internal, "even the lemurs have no hats"),
|
||||
&spb.Status{Code: int32(codes.Internal), Message: "even the lemurs have no hats"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
require.Equal(t, tc.want, ProtoFromErr(tc.err))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitOnErrors(t *testing.T) {
|
||||
errA := fmt.Errorf("the fish have the hats")
|
||||
errB := fmt.Errorf("who gave the fish hats")
|
||||
|
||||
tests := []struct {
|
||||
err error
|
||||
fs []func() error
|
||||
logged bool
|
||||
log string
|
||||
}{
|
||||
{
|
||||
nil, []func() error{}, false, "",
|
||||
},
|
||||
{
|
||||
errA,
|
||||
[]func() error{
|
||||
func() error {
|
||||
return errA
|
||||
},
|
||||
},
|
||||
false, "",
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]func() error{
|
||||
func() error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
false, "",
|
||||
},
|
||||
{
|
||||
errB,
|
||||
[]func() error{
|
||||
func() error {
|
||||
return errB
|
||||
},
|
||||
func() error {
|
||||
return errB
|
||||
},
|
||||
},
|
||||
true, "Multiple errors occurred in parallel execution. This error is suppressed by the error returned.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
logger, hook := test.NewNullLogger()
|
||||
wait := WaitOnErrors(logrus.NewEntry(logger), tc.fs...)
|
||||
|
||||
require.Equal(t, tc.err, wait())
|
||||
|
||||
if tc.logged {
|
||||
require.Equal(t, 1, len(hook.Entries))
|
||||
require.Equal(t, logrus.WarnLevel, hook.LastEntry().Level)
|
||||
require.Equal(t, tc.log, hook.LastEntry().Message)
|
||||
} else {
|
||||
require.Nil(t, hook.LastEntry())
|
||||
}
|
||||
}
|
||||
|
||||
_ = errB /////////////////////////////////////////////////////////////////////////////
|
||||
}
|
@ -15,10 +15,11 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"net/http"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
)
|
||||
|
||||
// ClientCache holds GRPC and HTTP clients based on an address.
|
||||
|
@ -15,9 +15,10 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/plugin/ochttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer/roundrobin"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
@ -311,7 +310,7 @@ func newGRPCDialOptions(enableMetrics bool, enableRPCLogging bool, enableRPCPayl
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(si...)),
|
||||
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(ui...)),
|
||||
grpc.WithBalancerName(roundrobin.Name),
|
||||
grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`),
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 20 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
|
@ -16,19 +16,21 @@ package rpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
shellTesting "open-match.dev/open-match/internal/testing"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSecureGRPCFromConfig(t *testing.T) {
|
||||
@ -103,9 +105,8 @@ func runGrpcClientTests(t *testing.T, assert *assert.Assertions, cfg config.View
|
||||
|
||||
s := &Server{}
|
||||
defer s.Stop()
|
||||
waitForStart, err := s.Start(rpcParams)
|
||||
err := s.Start(rpcParams)
|
||||
assert.Nil(err)
|
||||
waitForStart()
|
||||
|
||||
// Acquire grpc client
|
||||
grpcConn, err := GRPCClientFromConfig(cfg, "test")
|
||||
@ -128,9 +129,8 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
s := &Server{}
|
||||
defer s.Stop()
|
||||
waitForStart, err := s.Start(rpcParams)
|
||||
err := s.Start(rpcParams)
|
||||
assert.Nil(err)
|
||||
waitForStart()
|
||||
|
||||
// Acquire http client
|
||||
httpClient, baseURL, err := HTTPClientFromConfig(cfg, "test")
|
||||
@ -159,15 +159,15 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
|
||||
// Generate a config view and optional TLS key manifests (optional) for testing
|
||||
func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
|
||||
// Create netlisteners on random ports used for rpc serving
|
||||
grpcLh := MustListen()
|
||||
httpLh := MustListen()
|
||||
rpcParams := NewServerParamsFromListeners(grpcLh, httpLh)
|
||||
grpcL := MustListen()
|
||||
httpL := MustListen()
|
||||
rpcParams := NewServerParamsFromListeners(grpcL, httpL)
|
||||
|
||||
// Generate a config view with paths to the manifests
|
||||
cfg := viper.New()
|
||||
cfg.Set("test.hostname", "localhost")
|
||||
cfg.Set("test.grpcport", grpcLh.Number())
|
||||
cfg.Set("test.httpport", httpLh.Number())
|
||||
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
|
||||
cfg.Set("test.httpport", MustGetPortNumber(httpL))
|
||||
|
||||
// Create temporary TLS key files for testing
|
||||
pubFile, err := ioutil.TempFile("", "pub*")
|
||||
@ -176,8 +176,8 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
|
||||
if tlsEnabled {
|
||||
// Generate public and private key bytes
|
||||
pubBytes, priBytes, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting([]string{
|
||||
fmt.Sprintf("localhost:%d", grpcLh.Number()),
|
||||
fmt.Sprintf("localhost:%d", httpLh.Number()),
|
||||
fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL)),
|
||||
fmt.Sprintf("localhost:%s", MustGetPortNumber(httpL)),
|
||||
})
|
||||
assert.Nil(err)
|
||||
|
||||
@ -194,6 +194,22 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
|
||||
return cfg, rpcParams, func() { removeTempFile(assert, pubFile.Name()) }
|
||||
}
|
||||
|
||||
func MustListen() net.Listener {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func MustGetPortNumber(l net.Listener) string {
|
||||
_, port, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func removeTempFile(assert *assert.Assertions, paths ...string) {
|
||||
for _, path := range paths {
|
||||
err := os.Remove(path)
|
||||
|
@ -16,10 +16,8 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/pkg/errors"
|
||||
@ -28,40 +26,28 @@ import (
|
||||
)
|
||||
|
||||
type insecureServer struct {
|
||||
grpcLh *ListenerHolder
|
||||
grpcListener net.Listener
|
||||
grpcServer *grpc.Server
|
||||
|
||||
httpLh *ListenerHolder
|
||||
httpListener net.Listener
|
||||
httpMux *http.ServeMux
|
||||
proxyMux *runtime.ServeMux
|
||||
httpServer *http.Server
|
||||
}
|
||||
|
||||
func (s *insecureServer) start(params *ServerParams) (func(), error) {
|
||||
var serverStartWaiter sync.WaitGroup
|
||||
|
||||
func (s *insecureServer) start(params *ServerParams) error {
|
||||
s.httpMux = params.ServeMux
|
||||
s.proxyMux = runtime.NewServeMux()
|
||||
|
||||
// Configure the gRPC server.
|
||||
grpcListener, err := s.grpcLh.Obtain()
|
||||
if err != nil {
|
||||
return func() {}, errors.WithStack(err)
|
||||
}
|
||||
s.grpcListener = grpcListener
|
||||
|
||||
s.grpcServer = grpc.NewServer(newGRPCServerOptions(params)...)
|
||||
// Bind gRPC handlers
|
||||
for _, handlerFunc := range params.handlersForGrpc {
|
||||
handlerFunc(s.grpcServer)
|
||||
}
|
||||
|
||||
serverStartWaiter.Add(1)
|
||||
go func() {
|
||||
serverStartWaiter.Done()
|
||||
serverLogger.Infof("Serving gRPC: %s", s.grpcLh.AddrString())
|
||||
serverLogger.Infof("Serving gRPC: %s", s.grpcListener.Addr().String())
|
||||
gErr := s.grpcServer.Serve(s.grpcListener)
|
||||
if gErr != nil {
|
||||
return
|
||||
@ -69,21 +55,15 @@ func (s *insecureServer) start(params *ServerParams) (func(), error) {
|
||||
}()
|
||||
|
||||
// Configure the HTTP proxy server.
|
||||
httpListener, err := s.httpLh.Obtain()
|
||||
if err != nil {
|
||||
return func() {}, errors.WithStack(err)
|
||||
}
|
||||
s.httpListener = httpListener
|
||||
|
||||
// Bind gRPC handlers
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
for _, handlerFunc := range params.handlersForGrpcProxy {
|
||||
dialOpts := newGRPCDialOptions(params.enableMetrics, params.enableRPCLogging, params.enableRPCPayloadLogging)
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
if err = handlerFunc(ctx, s.proxyMux, grpcListener.Addr().String(), dialOpts); err != nil {
|
||||
if err := handlerFunc(ctx, s.proxyMux, s.grpcListener.Addr().String(), dialOpts); err != nil {
|
||||
cancel()
|
||||
return func() {}, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,38 +73,28 @@ func (s *insecureServer) start(params *ServerParams) (func(), error) {
|
||||
Addr: s.httpListener.Addr().String(),
|
||||
Handler: instrumentHTTPHandler(s.httpMux, params),
|
||||
}
|
||||
serverStartWaiter.Add(1)
|
||||
go func() {
|
||||
serverStartWaiter.Done()
|
||||
serverLogger.Infof("Serving HTTP: %s", s.httpLh.AddrString())
|
||||
serverLogger.Infof("Serving HTTP: %s", s.httpListener.Addr().String())
|
||||
hErr := s.httpServer.Serve(s.httpListener)
|
||||
defer cancel()
|
||||
if hErr != nil {
|
||||
serverLogger.Debugf("error closing gRPC server: %s", hErr)
|
||||
if hErr != nil && hErr != http.ErrServerClosed {
|
||||
serverLogger.Debugf("error serving HTTP: %s", hErr)
|
||||
}
|
||||
}()
|
||||
|
||||
return serverStartWaiter.Wait, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *insecureServer) stop() {
|
||||
s.grpcServer.Stop()
|
||||
if err := s.grpcListener.Close(); err != nil {
|
||||
serverLogger.Debugf("error closing gRPC listener: %s", err)
|
||||
}
|
||||
|
||||
if err := s.httpServer.Close(); err != nil {
|
||||
serverLogger.Debugf("error closing HTTP server: %s", err)
|
||||
}
|
||||
|
||||
if err := s.httpListener.Close(); err != nil {
|
||||
serverLogger.Debugf("error closing HTTP listener: %s", err)
|
||||
}
|
||||
func (s *insecureServer) stop() error {
|
||||
// the servers also close their respective listeners.
|
||||
err := s.httpServer.Shutdown(context.Background())
|
||||
s.grpcServer.GracefulStop()
|
||||
return err
|
||||
}
|
||||
|
||||
func newInsecureServer(grpcLh *ListenerHolder, httpLh *ListenerHolder) *insecureServer {
|
||||
func newInsecureServer(grpcL, httpL net.Listener) *insecureServer {
|
||||
return &insecureServer{
|
||||
grpcLh: grpcLh,
|
||||
httpLh: httpLh,
|
||||
grpcListener: grpcL,
|
||||
httpListener: httpL,
|
||||
}
|
||||
}
|
||||
|
@ -29,25 +29,24 @@ import (
|
||||
|
||||
func TestInsecureStartStop(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
grpcLh := MustListen()
|
||||
httpLh := MustListen()
|
||||
grpcL := MustListen()
|
||||
httpL := MustListen()
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
|
||||
params := NewServerParamsFromListeners(grpcLh, httpLh)
|
||||
params := NewServerParamsFromListeners(grpcL, httpL)
|
||||
params.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterFrontendServiceServer(s, ff)
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
s := newInsecureServer(grpcLh, httpLh)
|
||||
s := newInsecureServer(grpcL, httpL)
|
||||
defer s.stop()
|
||||
waitForStart, err := s.start(params)
|
||||
err := s.start(params)
|
||||
assert.Nil(err)
|
||||
waitForStart()
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf(":%d", grpcLh.Number()), grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
|
||||
assert.Nil(err)
|
||||
defer conn.Close()
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%d", httpLh.Number())
|
||||
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
|
||||
httpClient := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
@ -1,106 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ListenerHolder holds an opened port that can only be handed off to 1 go routine.
|
||||
type ListenerHolder struct {
|
||||
number int
|
||||
listener net.Listener
|
||||
addr string
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// Obtain returns the TCP listener. This method can only be called once and is thread-safe.
|
||||
func (lh *ListenerHolder) Obtain() (net.Listener, error) {
|
||||
lh.Lock()
|
||||
defer lh.Unlock()
|
||||
listener := lh.listener
|
||||
lh.listener = nil
|
||||
if listener == nil {
|
||||
return nil, errors.WithStack(fmt.Errorf("cannot Obtain() listener for %d because already handed off", lh.number))
|
||||
}
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
// Number returns the port number.
|
||||
func (lh *ListenerHolder) Number() int {
|
||||
return lh.number
|
||||
}
|
||||
|
||||
// AddrString returns the address of the serving port.
|
||||
// Use this over fmt.Sprintf(":%d", lh.Number()) because the address is represented differently in
|
||||
// systems that prefer IPv4 and IPv6.
|
||||
func (lh *ListenerHolder) AddrString() string {
|
||||
return lh.addr
|
||||
}
|
||||
|
||||
// Close shutsdown the TCP listener.
|
||||
func (lh *ListenerHolder) Close() error {
|
||||
lh.Lock()
|
||||
defer lh.Unlock()
|
||||
if lh.listener != nil {
|
||||
err := lh.listener.Close()
|
||||
lh.listener = nil
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newFromPortNumber opens a TCP listener based on the port number provided.
|
||||
func newFromPortNumber(portNumber int) (*ListenerHolder, error) {
|
||||
addr := ""
|
||||
// port 0 actually means random port which should only be used in tests.
|
||||
if portNumber == 0 {
|
||||
// Only accept connections from localhost in test mode.
|
||||
addr = fmt.Sprintf("localhost:%d", portNumber)
|
||||
} else {
|
||||
addr = fmt.Sprintf(":%d", portNumber)
|
||||
}
|
||||
conn, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tcpConn, ok := conn.Addr().(*net.TCPAddr)
|
||||
if !ok || tcpConn == nil {
|
||||
return nil, fmt.Errorf("net.Listen(\"tcp\", %s) did not return a *net.TCPAddr", addr)
|
||||
}
|
||||
|
||||
return &ListenerHolder{
|
||||
number: tcpConn.Port,
|
||||
listener: conn,
|
||||
addr: conn.Addr().String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustListen finds the next available port to open for TCP connections, used in tests to make them isolated.
|
||||
func MustListen() *ListenerHolder {
|
||||
// Port 0 in Go is a special port number to randomly choose an available port.
|
||||
// Reference, https://golang.org/pkg/net/#ListenTCP.
|
||||
lh, err := newFromPortNumber(0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return lh
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
numIterations = 1000
|
||||
)
|
||||
|
||||
// TestAddrString verifies that AddrString() is consistent with the port's Addr().String() value.
|
||||
func TestAddrString(t *testing.T) {
|
||||
lh, err := newFromPortNumber(0)
|
||||
if err != nil {
|
||||
t.Fatalf("newFromPortNumber(0) had error, %s", err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(lh.AddrString(), fmt.Sprintf(":%d", lh.Number())) {
|
||||
t.Errorf("%s does not have suffix ':%d'", lh.AddrString(), lh.Number())
|
||||
}
|
||||
|
||||
port, err := lh.Obtain()
|
||||
defer func() {
|
||||
err = port.Close()
|
||||
if err != nil {
|
||||
t.Errorf("error %s while calling port.Close()", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
t.Errorf("error %s while calling lh.Obtain", err)
|
||||
}
|
||||
if port.Addr().String() != lh.AddrString() {
|
||||
t.Errorf("port.Addr().String() = %s should match lh.AddrString() = %s", port.Addr().String(), lh.AddrString())
|
||||
}
|
||||
}
|
||||
|
||||
// TestObtain verifies that a ListenerHolder only returns Obtain() once.
|
||||
func TestObtain(t *testing.T) {
|
||||
var errCount uint64
|
||||
var obtainCount uint64
|
||||
lh, err := newFromPortNumber(0)
|
||||
if err != nil {
|
||||
t.Fatalf("newFromPortNumber(0) had error, %s", err)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numIterations; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
listener, err := lh.Obtain()
|
||||
if err != nil {
|
||||
atomic.AddUint64(&errCount, 1)
|
||||
} else if listener != nil {
|
||||
atomic.AddUint64(&obtainCount, 1)
|
||||
} else {
|
||||
t.Error("err and listener were both nil.")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
finalErrCount := atomic.LoadUint64(&errCount)
|
||||
finalObtainCount := atomic.LoadUint64(&obtainCount)
|
||||
if finalErrCount != numIterations-1 {
|
||||
t.Errorf("expected %d errors, got %d", numIterations-1, finalErrCount)
|
||||
}
|
||||
if finalObtainCount != 1 {
|
||||
t.Errorf("expected %d obtains, got %d", 1, finalObtainCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMustListen(t *testing.T) {
|
||||
for i := 0; i < numIterations; i++ {
|
||||
testName := fmt.Sprintf("[%d] MustListen", i)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
lh := MustListen()
|
||||
defer lh.Close()
|
||||
if lh.Number() <= 0 {
|
||||
t.Errorf("Expected %d > 0, port is out of range.", lh.Number())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -18,6 +18,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"time"
|
||||
@ -37,7 +38,6 @@ import (
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"open-match.dev/open-match/internal/config"
|
||||
"open-match.dev/open-match/internal/logging"
|
||||
"open-match.dev/open-match/internal/signal"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
)
|
||||
|
||||
@ -69,8 +69,8 @@ type ServerParams struct {
|
||||
handlersForGrpcProxy []GrpcProxyHandler
|
||||
handlersForHealthCheck []func(context.Context) error
|
||||
|
||||
grpcListener *ListenerHolder
|
||||
grpcProxyListener *ListenerHolder
|
||||
grpcListener net.Listener
|
||||
grpcProxyListener net.Listener
|
||||
|
||||
// Root CA public certificate in PEM format.
|
||||
rootCaPublicCertificateFileData []byte
|
||||
@ -83,28 +83,22 @@ type ServerParams struct {
|
||||
enableRPCLogging bool
|
||||
enableRPCPayloadLogging bool
|
||||
enableMetrics bool
|
||||
closer func()
|
||||
}
|
||||
|
||||
// NewServerParamsFromConfig returns server Params initialized from the configuration file.
|
||||
func NewServerParamsFromConfig(cfg config.View, prefix string) (*ServerParams, error) {
|
||||
grpcLh, err := newFromPortNumber(cfg.GetInt(prefix + ".grpcport"))
|
||||
func NewServerParamsFromConfig(cfg config.View, prefix string, listen func(network, address string) (net.Listener, error)) (*ServerParams, error) {
|
||||
grpcL, err := listen("tcp", fmt.Sprintf(":%d", cfg.GetInt(prefix+".grpcport")))
|
||||
if err != nil {
|
||||
serverLogger.Fatal(err)
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "can't start listener for grpc")
|
||||
}
|
||||
httpLh, err := newFromPortNumber(cfg.GetInt(prefix + ".httpport"))
|
||||
httpL, err := listen("tcp", fmt.Sprintf(":%d", cfg.GetInt(prefix+".httpport")))
|
||||
if err != nil {
|
||||
closeErr := grpcLh.Close()
|
||||
if closeErr != nil {
|
||||
serverLogger.WithFields(logrus.Fields{
|
||||
"error": closeErr.Error(),
|
||||
}).Info("failed to gRPC close port")
|
||||
}
|
||||
serverLogger.Fatal(err)
|
||||
return nil, err
|
||||
surpressedErr := grpcL.Close() // Don't care about additional errors when stopping.
|
||||
_ = surpressedErr
|
||||
return nil, errors.Wrap(err, "can't start listener for http")
|
||||
}
|
||||
p := NewServerParamsFromListeners(grpcLh, httpLh)
|
||||
|
||||
p := NewServerParamsFromListeners(grpcL, httpL)
|
||||
|
||||
certFile := cfg.GetString(configNameServerPublicCertificateFile)
|
||||
privateKeyFile := cfg.GetString(configNameServerPrivateKeyFile)
|
||||
@ -138,20 +132,18 @@ func NewServerParamsFromConfig(cfg config.View, prefix string) (*ServerParams, e
|
||||
p.enableMetrics = cfg.GetBool(telemetry.ConfigNameEnableMetrics)
|
||||
p.enableRPCLogging = cfg.GetBool(ConfigNameEnableRPCLogging)
|
||||
p.enableRPCPayloadLogging = logging.IsDebugEnabled(cfg)
|
||||
// TODO: This isn't ideal since telemetry requires config for it to be initialized.
|
||||
// This forces us to initialize readiness probes earlier than necessary.
|
||||
p.closer = telemetry.Setup(prefix, p.ServeMux, cfg)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewServerParamsFromListeners returns server Params initialized with the ListenerHolder variables.
|
||||
func NewServerParamsFromListeners(grpcLh *ListenerHolder, proxyLh *ListenerHolder) *ServerParams {
|
||||
func NewServerParamsFromListeners(grpcL net.Listener, proxyL net.Listener) *ServerParams {
|
||||
return &ServerParams{
|
||||
ServeMux: http.NewServeMux(),
|
||||
handlersForGrpc: []GrpcHandler{},
|
||||
handlersForGrpcProxy: []GrpcProxyHandler{},
|
||||
grpcListener: grpcLh,
|
||||
grpcProxyListener: proxyLh,
|
||||
grpcListener: grpcL,
|
||||
grpcProxyListener: proxyL,
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,68 +194,27 @@ func (p *ServerParams) invalidate() {
|
||||
// All HTTP traffic is served from a common http.ServeMux.
|
||||
type Server struct {
|
||||
serverWithProxy grpcServerWithProxy
|
||||
closer func()
|
||||
}
|
||||
|
||||
// grpcServerWithProxy this will go away when insecure.go and tls.go are merged into the same server.
|
||||
type grpcServerWithProxy interface {
|
||||
start(*ServerParams) (func(), error)
|
||||
stop()
|
||||
start(*ServerParams) error
|
||||
stop() error
|
||||
}
|
||||
|
||||
// Start the gRPC+HTTP(s) REST server.
|
||||
func (s *Server) Start(p *ServerParams) (func(), error) {
|
||||
func (s *Server) Start(p *ServerParams) error {
|
||||
if p.usingTLS() {
|
||||
s.serverWithProxy = newTLSServer(p.grpcListener, p.grpcProxyListener)
|
||||
} else {
|
||||
s.serverWithProxy = newInsecureServer(p.grpcListener, p.grpcProxyListener)
|
||||
}
|
||||
s.closer = p.closer
|
||||
return s.serverWithProxy.start(p)
|
||||
}
|
||||
|
||||
// Stop the gRPC+HTTP(s) REST server.
|
||||
func (s *Server) Stop() {
|
||||
s.serverWithProxy.stop()
|
||||
if s.closer != nil {
|
||||
s.closer()
|
||||
}
|
||||
}
|
||||
|
||||
// startServingIndefinitely creates a server based on the params and begins serving the gRPC and HTTP proxy.
|
||||
// It returns waitUntilKilled() which will wait indefinitely until crash or Ctrl+C is pressed.
|
||||
// forceStopServingFunc() is also returned which is used to force kill the server for tests.
|
||||
func startServingIndefinitely(params *ServerParams) (func(), func(), error) {
|
||||
s := &Server{}
|
||||
|
||||
// Start serving traffic.
|
||||
waitForStart, err := s.Start(params)
|
||||
if err != nil {
|
||||
serverLogger.WithFields(logrus.Fields{
|
||||
"error": err.Error(),
|
||||
}).Fatal("Failed to start gRPC and HTTP servers.")
|
||||
return func() {}, func() {}, err
|
||||
}
|
||||
serverLogger.Info("Server has started.")
|
||||
// Exit when we see a signal
|
||||
waitUntilKilled, forceStopServingFunc := signal.New()
|
||||
|
||||
waitForStart()
|
||||
serveUntilKilledFunc := func() {
|
||||
waitUntilKilled()
|
||||
s.Stop()
|
||||
serverLogger.Info("Shutting down server")
|
||||
}
|
||||
return serveUntilKilledFunc, forceStopServingFunc, nil
|
||||
}
|
||||
|
||||
// MustServeForever is a convenience method for starting a server and running it indefinitely.
|
||||
func MustServeForever(params *ServerParams) {
|
||||
serveUntilKilledFunc, _, err := startServingIndefinitely(params)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
serveUntilKilledFunc()
|
||||
func (s *Server) Stop() error {
|
||||
return s.serverWithProxy.stop()
|
||||
}
|
||||
|
||||
type loggingHTTPHandler struct {
|
||||
|
@ -16,40 +16,40 @@ package rpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/telemetry"
|
||||
shellTesting "open-match.dev/open-match/internal/testing"
|
||||
utilTesting "open-match.dev/open-match/internal/util/testing"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestStartStopServer(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
grpcLh := MustListen()
|
||||
httpLh := MustListen()
|
||||
grpcL := MustListen()
|
||||
httpL := MustListen()
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
|
||||
params := NewServerParamsFromListeners(grpcLh, httpLh)
|
||||
params := NewServerParamsFromListeners(grpcL, httpL)
|
||||
params.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterFrontendServiceServer(s, ff)
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
s := &Server{}
|
||||
defer s.Stop()
|
||||
|
||||
waitForStart, err := s.Start(params)
|
||||
assert.Nil(err)
|
||||
waitForStart()
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf(":%d", grpcLh.Number()), grpc.WithInsecure())
|
||||
err := s.Start(params)
|
||||
assert.Nil(err)
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%d", httpLh.Number())
|
||||
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
|
||||
assert.Nil(err)
|
||||
|
||||
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
|
||||
httpClient := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
@ -57,29 +57,6 @@ func TestStartStopServer(t *testing.T) {
|
||||
runGrpcWithProxyTests(t, assert, s.serverWithProxy, conn, httpClient, endpoint)
|
||||
}
|
||||
|
||||
func TestMustServeForever(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
grpcLh := MustListen()
|
||||
httpLh := MustListen()
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
|
||||
params := NewServerParamsFromListeners(grpcLh, httpLh)
|
||||
params.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterFrontendServiceServer(s, ff)
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
serveUntilKilledFunc, stopServingFunc, err := startServingIndefinitely(params)
|
||||
assert.Nil(err)
|
||||
go func() {
|
||||
// Wait for 500ms before killing the server.
|
||||
// It really doesn't matter if it actually comes up.
|
||||
// We just care that the server can respect an unexpected shutdown quickly after starting.
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
stopServingFunc()
|
||||
}()
|
||||
serveUntilKilledFunc()
|
||||
// This test will intentionally deadlock if the stop function is not respected.
|
||||
}
|
||||
|
||||
func runGrpcWithProxyTests(t *testing.T, assert *assert.Assertions, s grpcServerWithProxy, conn *grpc.ClientConn, httpClient *http.Client, endpoint string) {
|
||||
ctx := utilTesting.NewContext(t)
|
||||
feClient := pb.NewFrontendServiceClient(conn)
|
||||
|
@ -1,187 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/internal/util"
|
||||
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
|
||||
)
|
||||
|
||||
// MustServe creates a test server and returns TestContext that can be used to create clients.
|
||||
// This method pseudorandomly selects insecure and TLS mode to ensure both paths work.
|
||||
func MustServe(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
if r.Intn(2) == 0 {
|
||||
return MustServeInsecure(t, binder)
|
||||
}
|
||||
return MustServeTLS(t, binder)
|
||||
}
|
||||
|
||||
// MustServeInsecure creates a test server without transport encryption and returns TestContext that can be used to create clients.
|
||||
func MustServeInsecure(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
|
||||
grpcLh := rpc.MustListen()
|
||||
proxyLh := rpc.MustListen()
|
||||
|
||||
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
|
||||
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
|
||||
|
||||
p := rpc.NewServerParamsFromListeners(grpcLh, proxyLh)
|
||||
s := bindAndStart(t, p, binder)
|
||||
return &TestContext{
|
||||
t: t,
|
||||
s: s,
|
||||
grpcAddress: grpcAddress,
|
||||
proxyAddress: proxyAddress,
|
||||
mc: util.NewMultiClose(),
|
||||
}
|
||||
}
|
||||
|
||||
// MustServeTLS creates a test server with TLS and returns TestContext that can be used to create clients.
|
||||
func MustServeTLS(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
|
||||
grpcLh := rpc.MustListen()
|
||||
proxyLh := rpc.MustListen()
|
||||
|
||||
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
|
||||
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
|
||||
pub, priv, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting([]string{grpcAddress, proxyAddress})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create certificates %v", err)
|
||||
}
|
||||
|
||||
p := rpc.NewServerParamsFromListeners(grpcLh, proxyLh)
|
||||
p.SetTLSConfiguration(pub, pub, priv)
|
||||
s := bindAndStart(t, p, binder)
|
||||
|
||||
return &TestContext{
|
||||
t: t,
|
||||
s: s,
|
||||
grpcAddress: grpcAddress,
|
||||
proxyAddress: proxyAddress,
|
||||
trustedCertificate: pub,
|
||||
mc: util.NewMultiClose(),
|
||||
}
|
||||
}
|
||||
|
||||
func bindAndStart(t *testing.T, p *rpc.ServerParams, binder func(*rpc.ServerParams)) *rpc.Server {
|
||||
binder(p)
|
||||
s := &rpc.Server{}
|
||||
waitForStart, err := s.Start(p)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start server, %v", err)
|
||||
}
|
||||
waitForStart()
|
||||
return s
|
||||
}
|
||||
|
||||
// TestContext provides methods to interact with the Open Match server.
|
||||
type TestContext struct {
|
||||
t *testing.T
|
||||
s *rpc.Server
|
||||
grpcAddress string
|
||||
proxyAddress string
|
||||
trustedCertificate []byte
|
||||
mc *util.MultiClose
|
||||
}
|
||||
|
||||
// AddCloseFunc adds a close function.
|
||||
func (tc *TestContext) AddCloseFunc(closer func()) {
|
||||
tc.mc.AddCloseFunc(closer)
|
||||
}
|
||||
|
||||
// Close shutsdown the server and frees the TCP port.
|
||||
func (tc *TestContext) Close() {
|
||||
tc.mc.Close()
|
||||
tc.s.Stop()
|
||||
}
|
||||
|
||||
// Context returns a context appropriate for calling an RPC.
|
||||
func (tc *TestContext) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
// MustGRPC returns a grpc client configured to connect to an endpoint.
|
||||
func (tc *TestContext) MustGRPC() *grpc.ClientConn {
|
||||
conn, err := rpc.GRPCClientFromParams(tc.newClientParams(tc.grpcAddress))
|
||||
if err != nil {
|
||||
tc.t.Fatal(err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
// MustHTTP returns a HTTP(S) client configured to connect to an endpoint.
|
||||
func (tc *TestContext) MustHTTP() (*http.Client, string) {
|
||||
client, endpoint, err := rpc.HTTPClientFromParams(tc.newClientParams(tc.proxyAddress))
|
||||
if err != nil {
|
||||
tc.t.Fatal(err)
|
||||
}
|
||||
return client, endpoint
|
||||
}
|
||||
|
||||
func (tc *TestContext) newClientParams(address string) *rpc.ClientParams {
|
||||
return &rpc.ClientParams{
|
||||
Address: address,
|
||||
TrustedCertificate: tc.trustedCertificate,
|
||||
EnableRPCLogging: true,
|
||||
EnableRPCPayloadLogging: true,
|
||||
EnableMetrics: false,
|
||||
}
|
||||
}
|
||||
|
||||
// GetHostname returns the hostname of current text context
|
||||
func (tc *TestContext) GetHostname() string {
|
||||
return "localhost"
|
||||
}
|
||||
|
||||
// GetHTTPPort returns the http proxy port of current text context
|
||||
func (tc *TestContext) GetHTTPPort() int {
|
||||
_, port := hostnameAndPort(tc.t, tc.proxyAddress)
|
||||
return port
|
||||
}
|
||||
|
||||
// GetGRPCPort returns the grpc service port of current text context
|
||||
func (tc *TestContext) GetGRPCPort() int {
|
||||
_, port := hostnameAndPort(tc.t, tc.grpcAddress)
|
||||
return port
|
||||
}
|
||||
|
||||
func hostnameAndPort(t *testing.T, address string) (string, int) {
|
||||
// Coerce to a url.
|
||||
if !strings.Contains(address, "://") {
|
||||
address = "http://" + address
|
||||
}
|
||||
address = strings.Replace(address, "[::]", "localhost", -1)
|
||||
|
||||
u, err := url.Parse(address)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse address %s, %v", address, err)
|
||||
}
|
||||
port, err := strconv.Atoi(u.Port())
|
||||
if err != nil {
|
||||
t.Fatalf("cannot convert port number %s, %v", u.Port(), err)
|
||||
}
|
||||
return u.Hostname(), port
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"open-match.dev/open-match/internal/rpc"
|
||||
"open-match.dev/open-match/pkg/pb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
shellTesting "open-match.dev/open-match/internal/testing"
|
||||
)
|
||||
|
||||
// TestMustServerParamsForTesting verifies that a server can stand up in (insecure or TLS) mode.
|
||||
func TestMustServe(t *testing.T) {
|
||||
runMustServeTest(t, MustServe)
|
||||
}
|
||||
|
||||
// TestMustServerParamsForTesting verifies that a server can stand up in insecure mode.
|
||||
func TestMustServeInsecure(t *testing.T) {
|
||||
runMustServeTest(t, MustServeInsecure)
|
||||
}
|
||||
|
||||
// TestMustServerParamsForTesting verifies that a server can stand up in TLS mode.
|
||||
func TestMustServeTLS(t *testing.T) {
|
||||
runMustServeTest(t, MustServeTLS)
|
||||
}
|
||||
|
||||
func runMustServeTest(t *testing.T, mustServeFunc func(*testing.T, func(*rpc.ServerParams)) *TestContext) {
|
||||
assert := assert.New(t)
|
||||
ff := &shellTesting.FakeFrontend{}
|
||||
tc := mustServeFunc(t, func(spf *rpc.ServerParams) {
|
||||
spf.AddHandleFunc(func(s *grpc.Server) {
|
||||
pb.RegisterFrontendServiceServer(s, ff)
|
||||
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
|
||||
})
|
||||
defer tc.Close()
|
||||
|
||||
conn := tc.MustGRPC()
|
||||
c := pb.NewFrontendServiceClient(conn)
|
||||
resp, err := c.CreateTicket(tc.Context(), &pb.CreateTicketRequest{})
|
||||
assert.Nil(err)
|
||||
assert.NotNil(resp)
|
||||
|
||||
hc, endpoint := tc.MustHTTP()
|
||||
hResp, err := hc.Post(endpoint+"/v1/frontendservice/tickets", "application/json", strings.NewReader("{}"))
|
||||
assert.Nil(err)
|
||||
if hResp != nil {
|
||||
assert.Equal(200, hResp.StatusCode)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user