Compare commits

...

26 Commits

Author SHA1 Message Date
5f8febb517 Release 1.0.0-rc.1 (#1218) 2020-05-11 13:43:17 -07:00
93df53201c Only install ci components when running ci (#1213) 2020-05-08 16:06:22 -07:00
eb86841423 Add release all tickets API (#1215) 2020-05-08 15:07:45 -07:00
771f706317 Fix up gRPC service documentation (#1212) 2020-05-08 14:36:41 -07:00
a9f9a2f2e6 Remove alpha software warning (#1214) 2020-05-08 13:43:54 -07:00
068632285e Give assigned tickets a time to live, default 10 minutes (#1211) 2020-05-08 12:24:27 -07:00
113461114e Improve error message for overrunning mmfs (#1207) 2020-05-08 11:50:48 -07:00
0ac7ae13ac Rework config value naming (#1206) 2020-05-08 11:09:03 -07:00
29a2dbcf99 Unified images used in helm chart and release artifacts (#1184) 2020-05-08 10:42:16 -07:00
48d3b5c0ee Added Grafana dashboard of Open Match concepts (#1193)
Dependency on #1192, resolved #1124.

Added a dashboard in Matchmaking concepts, also removed the ticket dashboard.

https://snapshot.raintank.io/dashboard/snapshot/GzXuMdqx554TB6XsNm3al4d6IEyJrEY3
2020-05-08 10:15:34 -07:00
a5fa651106 Add grpc call options to matchfunction query functions (#1205) 2020-05-07 18:24:38 -07:00
cd84d74ff9 Fix race in e2e test (#1209) 2020-05-07 15:15:19 -07:00
8c2aa1ea81 Fix evaluator not running in mmf matchid collision test (#1210) 2020-05-07 14:53:12 -07:00
493ff8e520 Refactor internal telemetry package (#1192)
This commit refactored the internal telemetry package. The pattern used in internal/app/xxx/xxx.go follows the one used in openconcensus-go. Besides adding metrics covered in #1124, this commit also introduced changes to make the telemetry settings more efficient and easier to turn on/off.

In this factorization, a metric recorded can be cast into different views through different aggregation methods. Since the metric is the one that consumes most of the resources, this can make the telemetry setups more efficient than before.
Also removed some metrics that were meaningful for debugging in v0.8 but are becoming useless for the current stage.
2020-05-06 18:42:20 -07:00
8363bc5fc9 Refactor e2e testing and improve coverage (#1204) 2020-05-05 20:06:32 -07:00
144f646b7f Test tutorials (#1176) 2020-05-05 12:15:11 -07:00
b518b5cc1b Have the test instance host the mmf and evaluator (#1196) 2020-04-23 15:02:11 -07:00
af0b9fd5f7 Remove errant closing of already closed listeners (#1195) 2020-04-23 10:24:52 -07:00
5f4b522ecd Large refactor of rpc and appmain (#1194) 2020-04-21 14:07:09 -07:00
12625d7f53 Moved customized configmap values to default (#1191) 2020-04-20 15:11:13 -07:00
3248c8c4ad Refactor application binding (#1189) 2020-04-15 11:15:49 -07:00
10c0c59997 Use consistent main code for mmf and evaluator (#1185) 2020-04-09 18:37:32 -07:00
c17e3e62c0 Removed make all commands and pinned dependency versions (#1181)
* Removed make all commands

* oops
2020-04-03 12:01:32 -07:00
8e91be6201 Update development.md doc (#1182) 2020-04-02 15:50:00 -07:00
f6c837d6cd Removed make all commands and pinned dependency versions (#1181)
* Removed make all commands

* oops
2020-04-02 13:22:58 -07:00
3c8908aae0 Fix create-gke-cluster version (#1179) 2020-03-30 21:59:10 -07:00
148 changed files with 6085 additions and 4170 deletions

141
Makefile
View File

@ -52,7 +52,7 @@
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 0.0.0-dev
BASE_VERSION = 1.0.0-rc.1
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -67,6 +67,8 @@ MINIKUBE_VERSION = latest
GOLANGCI_VERSION = 1.18.0
KIND_VERSION = 0.5.1
SWAGGERUI_VERSION = 3.24.2
GOOGLE_APIS_VERSION = aba342359b6743353195ca53f944fe71e6fb6cd4
GRPC_GATEWAY_VERSION = 1.14.3
TERRAFORM_VERSION = 0.12.13
CHART_TESTING_VERSION = 2.4.0
@ -77,7 +79,6 @@ ENABLE_SECURITY_HARDENING = 0
GO = GO111MODULE=on go
# Defines the absolute local directory of the open-match project
REPOSITORY_ROOT := $(patsubst %/,%,$(dir $(abspath $(MAKEFILE_LIST))))
GO_BUILD_COMMAND = CGO_ENABLED=0 $(GO) build -a -installsuffix cgo .
BUILD_DIR = $(REPOSITORY_ROOT)/build
TOOLCHAIN_DIR = $(BUILD_DIR)/toolchain
TOOLCHAIN_BIN = $(TOOLCHAIN_DIR)/bin
@ -196,7 +197,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-pool base-build
IMAGES = $(CMDS) mmf-go-soloduel base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -236,9 +237,6 @@ $(foreach CMD,$(CMDS),build-$(CMD)-image): build-%-image: docker build-base-buil
build-mmf-go-soloduel-image: docker build-base-build-image
docker build -f examples/functions/golang/soloduel/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-soloduel:$(ALTERNATE_TAG) .
build-mmf-go-pool-image: docker build-base-build-image
docker build -f test/matchfunction/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-pool:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-pool:$(ALTERNATE_TAG) .
#######################################
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
@ -361,12 +359,16 @@ install-scale-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EX
# install-ci-chart will install open-match-core with pool based mmf for end-to-end in-cluster test.
install-ci-chart: install-chart-prerequisite build/toolchain/bin/helm$(EXE_EXTENSION) install/helm/open-match/secrets/
$(HELM) upgrade $(OPEN_MATCH_HELM_NAME) $(HELM_UPGRADE_FLAGS) --atomic install/helm/open-match $(HELM_IMAGE_FLAGS) \
--set open-match-core.ignoreListTTL=500ms \
--set open-match-customize.enabled=true \
--set open-match-customize.function.enabled=true \
--set open-match-customize.evaluator.enabled=true \
--set open-match-customize.function.image=openmatch-mmf-go-pool \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1,open-match-customize.evaluator.replicas=1,open-match-customize.function.replicas=1 \
--set query.replicas=1,frontend.replicas=1,backend.replicas=1 \
--set evaluator.hostName=test \
--set evaluator.grpcPort=50509 \
--set evaluator.httpPort=51509 \
--set open-match-core.registrationInterval=200ms \
--set open-match-core.proposalCollectionInterval=200ms \
--set open-match-core.assignedDeleteTimeout=200ms \
--set open-match-core.pendingReleaseTimeout=200ms \
--set open-match-core.queryPageSize=10 \
--set global.gcpProjectId=intentionally-invalid-value \
--set redis.master.resources.requests.cpu=0.6,redis.master.resources.requests.memory=300Mi \
--set ci=true
@ -378,6 +380,10 @@ delete-chart: build/toolchain/bin/helm$(EXE_EXTENSION) build/toolchain/bin/kubec
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)
-$(KUBECTL) delete namespace $(OPEN_MATCH_KUBERNETES_NAMESPACE)-demo
ifneq ($(BASE_VERSION), 0.0.0-dev)
install/yaml/: REGISTRY = gcr.io/$(OPEN_MATCH_PUBLIC_IMAGES_PROJECT_ID)
install/yaml/: TAG = $(BASE_VERSION)
endif
install/yaml/: update-chart-deps install/yaml/install.yaml install/yaml/01-open-match-core.yaml install/yaml/02-open-match-demo.yaml install/yaml/03-prometheus-chart.yaml install/yaml/04-grafana-chart.yaml install/yaml/05-jaeger-chart.yaml install/yaml/06-open-match-override-configmap.yaml install/yaml/07-open-match-default-evaluator.yaml
install/yaml/01-open-match-core.yaml: build/toolchain/bin/helm$(EXE_EXTENSION)
@ -535,13 +541,13 @@ build/toolchain/bin/protoc-gen-swagger$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cd $(TOOLCHAIN_BIN) && $(GO) build -i -pkgdir . github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
build/toolchain/bin/certgen$(EXE_EXTENSION): tools/certgen/certgen$(EXE_EXTENSION)
build/toolchain/bin/certgen$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cp -f $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION) $(CERTGEN)
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/certgen/
build/toolchain/bin/reaper$(EXE_EXTENSION): tools/reaper/reaper$(EXE_EXTENSION)
build/toolchain/bin/reaper$(EXE_EXTENSION):
mkdir -p $(TOOLCHAIN_BIN)
cp -f $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION) $(TOOLCHAIN_BIN)/reaper$(EXE_EXTENSION)
cd $(TOOLCHAIN_BIN) && $(GO) build $(REPOSITORY_ROOT)/tools/reaper/
# Fake target for docker
docker: no-sudo
@ -591,7 +597,7 @@ get-kind-kubeconfig: build/toolchain/bin/kind$(EXE_EXTENSION)
delete-kind-cluster: build/toolchain/bin/kind$(EXE_EXTENSION) build/toolchain/bin/kubectl$(EXE_EXTENSION)
-$(KIND) delete cluster
create-gke-cluster: GKE_VERSION = 1.14.8-gke.17 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_VERSION = 1.14.10-gke.32 # gcloud beta container get-server-config --zone us-west1-a
create-gke-cluster: GKE_CLUSTER_SHAPE_FLAGS = --machine-type n1-standard-4 --enable-autoscaling --min-nodes 1 --num-nodes 2 --max-nodes 10 --disk-size 50
create-gke-cluster: GKE_FUTURE_COMPAT_FLAGS = --no-enable-basic-auth --no-issue-client-certificate --enable-ip-alias --metadata disable-legacy-endpoints=true --enable-autoupgrade
create-gke-cluster: build/toolchain/bin/kubectl$(EXE_EXTENSION) gcloud
@ -673,9 +679,28 @@ build: assets
$(GO) build ./...
$(GO) build -tags e2ecluster ./...
define test_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./... && \
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./... \
)
$(foreach dir, $(wildcard $(1)/*/.), $(call test_folder, $(dir)))
endef
define fast_test_folder
$(if $(wildcard $(1)/go.mod), \
cd $(1) && \
$(GO) test ./... \
)
$(foreach dir, $(wildcard $(1)/*/.), $(call fast_test_folder, $(dir)))
endef
test: $(ALL_PROTOS) tls-certs third_party/
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -race ./...
$(GO) test -cover -test.count $(GOLANG_TEST_COUNT) -run IgnoreRace$$ ./...
$(call test_folder,.)
fasttest: $(ALL_PROTOS) tls-certs third_party/
$(call fast_test_folder,.)
test-e2e-cluster: all-protos tls-certs third_party/
$(HELM) test --timeout 7m30s -v 0 --logs -n $(OPEN_MATCH_KUBERNETES_NAMESPACE) $(OPEN_MATCH_HELM_NAME)
@ -720,57 +745,6 @@ build/cmd/demo-%/COPY_PHONY:
mkdir -p $(BUILD_DIR)/cmd/demo-$*/
cp -r examples/demo/static $(BUILD_DIR)/cmd/demo-$*/static
all: service-binaries example-binaries tools-binaries
service-binaries: cmd/minimatch/minimatch$(EXE_EXTENSION) cmd/swaggerui/swaggerui$(EXE_EXTENSION)
service-binaries: cmd/backend/backend$(EXE_EXTENSION) cmd/frontend/frontend$(EXE_EXTENSION)
service-binaries: cmd/query/query$(EXE_EXTENSION) cmd/synchronizer/synchronizer$(EXE_EXTENSION)
example-binaries: example-mmf-binaries
example-mmf-binaries: examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/examples/functions/golang/soloduel; $(GO_BUILD_COMMAND)
test/matchfunction/matchfunction$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/test/matchfunction; $(GO_BUILD_COMMAND)
tools-binaries: tools/certgen/certgen$(EXE_EXTENSION) tools/reaper/reaper$(EXE_EXTENSION)
cmd/backend/backend$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
cd $(REPOSITORY_ROOT)/cmd/backend; $(GO_BUILD_COMMAND)
cmd/frontend/frontend$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
cd $(REPOSITORY_ROOT)/cmd/frontend; $(GO_BUILD_COMMAND)
cmd/query/query$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
cd $(REPOSITORY_ROOT)/cmd/query; $(GO_BUILD_COMMAND)
cmd/default-evaluator/default-evaluator$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cd $(REPOSITORY_ROOT)/cmd/evaluator; $(GO_BUILD_COMMAND)
cmd/synchronizer/synchronizer$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
cd $(REPOSITORY_ROOT)/cmd/synchronizer; $(GO_BUILD_COMMAND)
# Note: This list of dependencies is long but only add file references here. If you add a .PHONY dependency make will always rebuild it.
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.gw.go api/frontend.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cmd/minimatch/minimatch$(EXE_EXTENSION): pkg/pb/messages.pb.go
cmd/minimatch/minimatch$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
cd $(REPOSITORY_ROOT)/cmd/minimatch; $(GO_BUILD_COMMAND)
cmd/swaggerui/swaggerui$(EXE_EXTENSION): third_party/swaggerui/
cd $(REPOSITORY_ROOT)/cmd/swaggerui; $(GO_BUILD_COMMAND)
tools/certgen/certgen$(EXE_EXTENSION):
cd $(REPOSITORY_ROOT)/tools/certgen/ && $(GO_BUILD_COMMAND)
tools/reaper/reaper$(EXE_EXTENSION):
cd $(REPOSITORY_ROOT)/tools/reaper/ && $(GO_BUILD_COMMAND)
build/policies/binauthz.yaml: install/policies/binauthz.yaml
mkdir -p $(BUILD_DIR)/policies
cp -f $(REPOSITORY_ROOT)/install/policies/binauthz.yaml $(BUILD_DIR)/policies/binauthz.yaml
@ -827,7 +801,7 @@ ci-reap-namespaces: build/toolchain/bin/reaper$(EXE_EXTENSION)
# For presubmit we want to update the protobuf generated files and verify that tests are good.
presubmit: GOLANG_TEST_COUNT = 5
presubmit: clean third_party/ update-chart-deps assets update-deps lint build install-toolchain test md-test terraform-test
presubmit: clean third_party/ update-chart-deps assets update-deps lint build test md-test terraform-test
build/release/: presubmit clean-install-yaml install/yaml/
mkdir -p $(BUILD_DIR)/release/
@ -861,19 +835,6 @@ clean-protos:
rm -rf $(REPOSITORY_ROOT)/pkg/pb/
rm -rf $(REPOSITORY_ROOT)/internal/ipb/
clean-binaries:
rm -rf $(REPOSITORY_ROOT)/cmd/backend/backend$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/synchronizer/synchronizer$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/frontend/frontend$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/query/query$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/minimatch/minimatch$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/test/matchfunction/matchfunction$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/test/evaluator/evaluator$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/cmd/swaggerui/swaggerui$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/tools/certgen/certgen$(EXE_EXTENSION)
rm -rf $(REPOSITORY_ROOT)/tools/reaper/reaper$(EXE_EXTENSION)
clean-terraform:
rm -rf $(REPOSITORY_ROOT)/install/terraform/.terraform/
@ -898,7 +859,7 @@ clean-swagger-docs:
clean-third-party:
rm -rf $(REPOSITORY_ROOT)/third_party/
clean: clean-images clean-binaries clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
clean: clean-images clean-build clean-install-yaml clean-secrets clean-terraform clean-third-party clean-protos clean-swagger-docs
proxy-frontend: build/toolchain/bin/kubectl$(EXE_EXTENSION)
@echo "Frontend Health: http://localhost:$(FRONTEND_PORT)/healthz"
@ -965,18 +926,18 @@ third_party/google/api:
mkdir -p $(TOOLCHAIN_DIR)/googleapis-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/google/api
mkdir -p $(REPOSITORY_ROOT)/third_party/google/rpc
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/master.zip
curl -o $(TOOLCHAIN_DIR)/googleapis-temp/googleapis.zip -L https://github.com/googleapis/googleapis/archive/$(GOOGLE_APIS_VERSION).zip
(cd $(TOOLCHAIN_DIR)/googleapis-temp/; unzip -q -o googleapis.zip)
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-master/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/api/*.proto $(REPOSITORY_ROOT)/third_party/google/api/
cp -f $(TOOLCHAIN_DIR)/googleapis-temp/googleapis-$(GOOGLE_APIS_VERSION)/google/rpc/*.proto $(REPOSITORY_ROOT)/third_party/google/rpc/
rm -rf $(TOOLCHAIN_DIR)/googleapis-temp
third_party/protoc-gen-swagger/options:
mkdir -p $(TOOLCHAIN_DIR)/grpc-gateway-temp/
mkdir -p $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/master.zip
curl -o $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway.zip -L https://github.com/grpc-ecosystem/grpc-gateway/archive/v$(GRPC_GATEWAY_VERSION).zip
(cd $(TOOLCHAIN_DIR)/grpc-gateway-temp/; unzip -q -o grpc-gateway.zip)
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-master/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
cp -f $(TOOLCHAIN_DIR)/grpc-gateway-temp/grpc-gateway-$(GRPC_GATEWAY_VERSION)/protoc-gen-swagger/options/*.proto $(REPOSITORY_ROOT)/third_party/protoc-gen-swagger/options/
rm -rf $(TOOLCHAIN_DIR)/grpc-gateway-temp
third_party/swaggerui/:

View File

@ -24,10 +24,6 @@ The [Open Match Development guide](docs/development.md) has detailed instruction
on getting the source code, making changes, testing and submitting a pull request
to Open Match.
## Disclaimer
This software is currently alpha, and subject to change.
## Support
* [Slack Channel](https://open-match.slack.com/) ([Signup](https://join.slack.com/t/open-match/shared_invite/enQtNDM1NjcxNTY4MTgzLTM5ZWQxNjc1YWI3MzJmN2RiMWJmYWI0ZjFiNzNkZmNkMWQ3YWU5OGVkNzA5Yzc4OGVkOGU5MTc0OTA5ZTA5NDU))

View File

@ -88,6 +88,10 @@ message ReleaseTicketsRequest{
message ReleaseTicketsResponse {}
message ReleaseAllTicketsRequest{}
message ReleaseAllTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup{
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
@ -120,9 +124,11 @@ message AssignTicketsResponse {
// The BackendService implements APIs to generate matches and handle ticket assignments.
service BackendService {
// FetchMatches triggers a MatchFunction with the specified MatchProfile and returns a set of match proposals that
// match the description of that MatchProfile.
// FetchMatches immediately returns an error if it encounters any execution failures.
// FetchMatches triggers a MatchFunction with the specified MatchProfile and
// returns a set of matches generated by the Match Making Function, and
// accepted by the evaluator.
// Tickets in matches returned by FetchMatches are moved from active to
// pending, and will not be returned by query.
rpc FetchMatches(FetchMatchesRequest) returns (stream FetchMatchesResponse) {
option (google.api.http) = {
post: "/v1/backendservice/matches:fetch"
@ -138,9 +144,8 @@ service BackendService {
};
}
// ReleaseTickets removes the submitted tickets from the list that prevents tickets
// that are awaiting assignment from appearing in MMF queries, effectively putting them back into
// the matchmaking pool
// ReleaseTickets moves tickets from the pending state, to the active state.
// This enables them to be returned by query, and find different matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
@ -150,4 +155,17 @@ service BackendService {
body: "*"
};
}
// ReleaseAllTickets moves all tickets from the pending state, to the active
// state. This enables them to be returned by query, and find different
// matches.
//
// BETA FEATURE WARNING: This call and the associated Request and Response
// messages are not finalized and still subject to possible change or removal.
rpc ReleaseAllTickets(ReleaseAllTicketsRequest) returns (ReleaseAllTicketsResponse) {
option (google.api.http) = {
post: "/v1/backendservice/tickets:releaseall"
body: "*"
};
}
}

View File

@ -26,7 +26,7 @@
"paths": {
"/v1/backendservice/matches:fetch": {
"post": {
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and returns a set of match proposals that \nmatch the description of that MatchProfile.\nFetchMatches immediately returns an error if it encounters any execution failures.",
"summary": "FetchMatches triggers a MatchFunction with the specified MatchProfile and\nreturns a set of matches generated by the Match Making Function, and\naccepted by the evaluator.\nTickets in matches returned by FetchMatches are moved from active to\npending, and will not be returned by query.",
"operationId": "FetchMatches",
"responses": {
"200": {
@ -94,7 +94,7 @@
},
"/v1/backendservice/tickets:release": {
"post": {
"summary": "ReleaseTickets removes the submitted tickets from the list that prevents tickets \nthat are awaiting assignment from appearing in MMF queries, effectively putting them back into\nthe matchmaking pool",
"summary": "ReleaseTickets moves tickets from the pending state, to the active state.\nThis enables them to be returned by query, and find different matches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseTickets",
"responses": {
@ -126,6 +126,41 @@
"BackendService"
]
}
},
"/v1/backendservice/tickets:releaseall": {
"post": {
"summary": "ReleaseAllTickets moves all tickets from the pending state, to the active\nstate. This enables them to be returned by query, and find different\nmatches.",
"description": "BETA FEATURE WARNING: This call and the associated Request and Response\nmessages are not finalized and still subject to possible change or removal.",
"operationId": "ReleaseAllTickets",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchReleaseAllTicketsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchReleaseAllTicketsRequest"
}
}
],
"tags": [
"BackendService"
]
}
}
},
"definitions": {
@ -176,7 +211,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchAssignmentFailure": {
"type": "object",
@ -368,6 +403,12 @@
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchReleaseAllTicketsRequest": {
"type": "object"
},
"openmatchReleaseAllTicketsResponse": {
"type": "object"
},
"openmatchReleaseTicketsRequest": {
"type": "object",
"properties": {
@ -442,7 +483,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -458,10 +499,10 @@
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"protobufAny": {
"type": "object",

View File

@ -76,7 +76,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchEvaluateRequest": {
"type": "object",
@ -165,7 +165,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -181,10 +181,10 @@
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"protobufAny": {
"type": "object",

View File

@ -95,9 +95,7 @@ service FrontendService {
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
// The client should delete the Ticket when finished matchmaking with it.
rpc DeleteTicket(DeleteTicketRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1/frontendservice/tickets/{ticket_id}"

View File

@ -91,7 +91,7 @@
]
},
"delete": {
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client must delete the Ticket when finished matchmaking with it. \n - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.\nUsers may still be able to assign/get a ticket after calling DeleteTicket on it.",
"summary": "DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.\nThe client should delete the Ticket when finished matchmaking with it.",
"operationId": "DeleteTicket",
"responses": {
"200": {
@ -172,7 +172,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchCreateTicketRequest": {
"type": "object",
@ -220,7 +220,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -236,10 +236,10 @@
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",

View File

@ -75,7 +75,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchDoubleRangeFilter": {
"type": "object",
@ -269,7 +269,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -285,10 +285,10 @@
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"protobufAny": {
"type": "object",

View File

@ -21,16 +21,18 @@ import "google/rpc/status.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
// A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an
// individual 'Player' or a 'Group' of players. Open Match will not interpret
// what the Ticket represents but just treat it as a matchmaking unit with a set
// of SearchFields. Open Match stores the Ticket in state storage and enables an
// Assignment to be associated with this Ticket.
// A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent
// an individual 'Player', a 'Group' of players, or any other concepts unique to
// your use case. Open Match will not interpret what the Ticket represents but
// just treat it as a matchmaking unit with a set of SearchFields. Open Match
// stores the Ticket in state storage and enables an Assignment to be set on the
// Ticket.
message Ticket {
// Id represents an auto-generated Id issued by Open Match.
string id = 1;
// An Assignment represents a game server assignment associated with a Ticket.
// An Assignment represents a game server assignment associated with a Ticket,
// or whatever finalized matched state means for your use case.
// Open Match does not require or inspect any fields on Assignment.
Assignment assignment = 3;
@ -43,8 +45,8 @@ message Ticket {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 5;
// Create time represents the time at which this Ticket was created. It is
// populated by Open Match at the time of Ticket creation.
// Create time is the time the Ticket was created. It is populated by Open
// Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 6;
// Deprecated fields.
@ -64,8 +66,8 @@ message SearchFields {
repeated string tags = 3;
}
// An Assignment represents a game server assignment associated with a Ticket. Open
// match does not require or inspect any fields on assignment.
// An Assignment represents a game server assignment associated with a Ticket.
// Open Match does not require or inspect any fields on assignment.
message Assignment {
// Connection information for this Assignment.
string connection = 1;

View File

@ -79,8 +79,8 @@ message QueryTicketIdsResponse {
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// QueryTickets pages the Tickets by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/tickets:query"
@ -90,8 +90,8 @@ service QueryService {
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// QueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.
// - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/ticketids:query"

View File

@ -26,7 +26,7 @@
"paths": {
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"responses": {
"200": {
@ -60,7 +60,7 @@
},
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `storage.pool.size` and stream back responses.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `queryPageSize` and stream back responses.\n - queryPageSize is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTickets",
"responses": {
"200": {
@ -109,7 +109,7 @@
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
}
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on assignment."
},
"openmatchDoubleRangeFilter": {
"type": "object",
@ -271,7 +271,7 @@
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment represents a game server assignment associated with a Ticket.\nOpen Match does not require or inspect any fields on Assignment."
"description": "An Assignment represents a game server assignment associated with a Ticket,\nor whatever finalized matched state means for your use case.\nOpen Match does not require or inspect any fields on Assignment."
},
"search_fields": {
"$ref": "#/definitions/openmatchSearchFields",
@ -287,10 +287,10 @@
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
"description": "Create time is the time the Ticket was created. It is populated by Open\nMatch at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket may represent\nan individual 'Player', a 'Group' of players, or any other concepts unique to\nyour use case. Open Match will not interpret what the Ticket represents but\njust treat it as a matchmaking unit with a set of SearchFields. Open Match\nstores the Ticket in state storage and enables an Assignment to be set on the\nTicket."
},
"protobufAny": {
"type": "object",

View File

@ -164,7 +164,7 @@ artifacts:
- install/yaml/06-open-match-override-configmap.yaml
substitutions:
_OM_VERSION: "0.0.0-dev"
_OM_VERSION: "1.0.0-rc.1"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
logsBucket: 'gs://open-match-build-logs/'

View File

@ -16,11 +16,10 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/backend"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("backend", config.Read, backend.BindService)
appmain.RunApplication("backend", backend.BindService)
}

View File

@ -11,14 +11,14 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/app/evaluator/defaulteval"
"open-match.dev/open-match/internal/appmain"
)
func main() {
// Invoke the harness to setup a GRPC service that handles requests to run the evaluator.
evaluator.RunEvaluator(defaulteval.Evaluate)
appmain.RunApplication("evaluator", defaulteval.BindService)
}

View File

@ -16,11 +16,10 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/frontend"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("frontend", config.Read, frontend.BindService)
appmain.RunApplication("frontend", frontend.BindService)
}

View File

@ -16,11 +16,10 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/minimatch"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("minimatch", config.Read, minimatch.BindService)
appmain.RunApplication("minimatch", minimatch.BindService)
}

View File

@ -16,11 +16,10 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/query"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("query", config.Read, query.BindService)
appmain.RunApplication("query", query.BindService)
}

View File

@ -16,10 +16,9 @@ package main
import (
"open-match.dev/open-match/examples/scale/backend"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("scale", config.Read, backend.BindService)
appmain.RunApplication("scale", backend.BindService)
}

View File

@ -16,10 +16,9 @@ package main
import (
"open-match.dev/open-match/examples/scale/frontend"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("scale", config.Read, frontend.BindService)
appmain.RunApplication("scale", frontend.BindService)
}

View File

@ -1,10 +1,10 @@
{
"urls": [
{"name": "Frontend", "url": "https://open-match.dev/api/v0.0.0-dev/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v0.0.0-dev/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v0.0.0-dev/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v0.0.0-dev/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v0.0.0-dev/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v0.0.0-dev/evaluator.swagger.json"}
{"name": "Frontend", "url": "https://open-match.dev/api/v1.0.0-rc.1/frontend.swagger.json"},
{"name": "Backend", "url": "https://open-match.dev/api/v1.0.0-rc.1/backend.swagger.json"},
{"name": "Query", "url": "https://open-match.dev/api/v1.0.0-rc.1/query.swagger.json"},
{"name": "MatchFunction", "url": "https://open-match.dev/api/v1.0.0-rc.1/matchfunction.swagger.json"},
{"name": "Synchronizer", "url": "https://open-match.dev/api/v1.0.0-rc.1/synchronizer.swagger.json"},
{"name": "Evaluator", "url": "https://open-match.dev/api/v1.0.0-rc.1/evaluator.swagger.json"}
]
}

View File

@ -16,11 +16,10 @@
package main
import (
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/app/synchronizer"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
)
func main() {
app.RunApplication("synchronizer", config.Read, synchronizer.BindService)
appmain.RunApplication("synchronizer", synchronizer.BindService)
}

View File

@ -9,14 +9,12 @@ To build Open Match you'll need the following applications installed.
* [Git](https://git-scm.com/downloads)
* [Go](https://golang.org/doc/install)
* [Python3 with virtualenv](https://wiki.python.org/moin/BeginnersGuide/Download)
* Make (Mac: install [XCode](https://itunes.apple.com/us/app/xcode/id497799835))
* [Docker](https://docs.docker.com/install/) including the
[post-install steps](https://docs.docker.com/install/linux/linux-postinstall/).
Optional Software
* [Google Cloud Platform](gcloud.md)
* [Visual Studio Code](https://code.visualstudio.com/Download) for IDE.
Vim and Emacs work to.
* [VirtualBox](https://www.virtualbox.org/wiki/Downloads) recommended for
@ -27,8 +25,7 @@ running:
```bash
sudo apt-get update
sudo apt-get install -y -q python3 python3-virtualenv virtualenv make \
google-cloud-sdk git unzip tar
sudo apt-get install -y -q make google-cloud-sdk git unzip tar
```
*It's recommended that you install Go using their instructions because package
@ -51,13 +48,11 @@ make
[create a fork](https://help.github.com/en/articles/fork-a-repo) and use that
but for purpose of this guide we'll be using the upstream/master.*
## Building
## Building code and images
```bash
# Reset workspace
make clean
# Compile all the binaries
make all -j$(nproc)
# Run tests
make test
# Build all the images.
@ -87,11 +82,9 @@ default context the Makefile will honor that._
# GKE cluster: make create-gke-cluster/delete-gke-cluster
# or create a local Minikube cluster
make create-gke-cluster
# Step 2: Download helm and install Tiller in the cluster
make push-helm
# Step 3: Build and Push Open Match Images to gcr.io
# Step 2: Build and Push Open Match Images to gcr.io
make push-images -j$(nproc)
# Install Open Match in the cluster.
# Step 3: Install Open Match in the cluster.
make install-chart
# Create a proxy to Open Match pods so that you can access them locally.
@ -105,12 +98,29 @@ make proxy
make delete-chart
```
## Interaction
## Iterating
While iterating on the project, you may need to:
1. Install/Run everything
2. Make some code changes
3. Make sure the changes compile by running `make test`
4. Build and push Docker images to your personal registry by running `make push-images -j$(nproc)`
5. Deploy the code change by running `make install-chart`
6. Verify it's working by [looking at the logs](#accessing-logs) or looking at the monitoring dashboard by running `make proxy-grafana`
7. Tear down Open Match by running `make delete-chart`
Before integrating with Open Match you can manually interact with it to get a feel for how it works.
## Accessing logs
To look at Open Match core services' logs, run:
```bash
# Replace om-frontend with the service name that you would like to access
kubectl logs -n open-match svc/om-frontend
```
`make proxy-ui` exposes the Swagger UI for Open Match locally on your computer.
You can then go to http://localhost:51500 and view the API as well as interactively call Open Match.
## API References
While integrating with Open Match you may want to understand its API surface concepts or interact with it and get a feel for how it works.
The APIs are defined in `proto` format under the `api/` folder, with references available at [open-match.dev](https://open-match.dev/site/docs/reference/api/).
You can also run `make proxy-ui` to exposes the Swagger UI for Open Match locally on your computer after [deploying it to Kubernetes](#deploying-to-kubernetes), then go to http://localhost:51500 and view the REST APIs as well as interactively call Open Match.
By default you will be talking to the frontend server but you can change the target API url to any of the following:
@ -144,55 +154,9 @@ export GOPATH=$HOME/workspace/
## Pull Requests
If you want to submit a Pull Request there's some tools to help prepare your
change.
```bash
# Runs code generators, tests, and linters.
make presubmit
```
`make presubmit` catches most of the issues your change can run into. If the
submit checks fail you can run it locally via,
```bash
make local-cloud-build
```
If you want to submit a Pull Request, `make presubmit` can catch most of the issues your change can run into.
Our [continuous integration](https://console.cloud.google.com/cloud-build/builds?project=open-match-build)
runs against all PRs. In order to see your build results you'll need to
become a member of
[open-match-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/open-match-discuss).
## Makefile
The Makefile is the core of Open Match's build process. There's a lot of
commands but here's a list of the important ones and patterns to remember them.
```bash
# Help
make
# Reset workspace (delete all build artifacts)
make clean
# Delete auto-generated protobuf code and swagger API docs.
make clean-protos clean-swagger-docs
# make clean-* deletes some part of the build outputs.
# Build all Docker images
make build-images
# Build frontend docker image.
make build-frontend-image
# Formats, Vets, and tests the codebase.
make fmt vet test
# Same as above also regenerates autogen files.
make presubmit
# Run website on http://localhost:8080
make run-site
# Proxy all Open Match processes to view them.
make proxy
```

View File

@ -25,6 +25,7 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"open-match.dev/open-match/examples/scale/scenarios"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
@ -53,8 +54,8 @@ var (
// Run triggers execution of functions that continuously fetch, assign and
// delete matches.
func BindService(p *rpc.ServerParams, cfg config.View) error {
go run(cfg)
func BindService(p *appmain.Params, b *appmain.Bindings) error {
go run(p.Config())
return nil
}

View File

@ -24,6 +24,7 @@ import (
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"open-match.dev/open-match/examples/scale/scenarios"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
@ -45,8 +46,8 @@ var (
// Run triggers execution of the scale frontend component that creates
// tickets at scale in Open Match.
func BindService(p *rpc.ServerParams, cfg config.View) error {
go run(cfg)
func BindService(p *appmain.Params, b *appmain.Bindings) error {
go run(p.Config())
return nil
}

1
go.sum
View File

@ -79,6 +79,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=

View File

@ -13,8 +13,8 @@
# limitations under the License.
apiVersion: v2
appVersion: "0.0.0-dev"
version: 0.0.0-dev
appVersion: "1.0.0-rc.1"
version: 1.0.0-rc.1
name: open-match
dependencies:
- name: redis

View File

@ -1,41 +0,0 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: customize-configmap
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: config
release: {{ .Release.Name }}
data:
matchmaker_config_default.yaml: |-
api:
functions:
hostname: "{{ .Values.function.hostName }}"
grpcport: "{{ .Values.function.grpcPort }}"
httpport: "{{ .Values.function.httpPort }}"
evaluator:
hostname: "{{ .Values.evaluator.hostName }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
matchmaker_config_override.yaml: |-
api:
query:
hostname: "{{ .Values.query.hostName }}.{{ .Release.Namespace }}.svc.cluster.local"
grpcport: "{{ .Values.query.grpcPort }}"

View File

@ -35,11 +35,11 @@ evaluatorConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: customize-configmap
configName: om-configmap-default
customize:
volumeName: customize-config-volume
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: customize-configmap
configName: om-configmap-override
mmfConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,
@ -48,8 +48,8 @@ mmfConfigs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: customize-configmap
configName: om-configmap-default
customize:
volumeName: customize-config-volume
volumeName: om-config-volume-override
mountPath: /app/config/override
configName: customize-configmap
configName: om-configmap-override

View File

@ -1,290 +0,0 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 3,
"iteration": 1562886170229,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(frontend_tickets_created[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Created",
"refId": "A"
},
{
"expr": "sum(rate(frontend_tickets_deleted[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Deleted",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Ticket Flow",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"description": "",
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 4,
"interval": "",
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(frontend_tickets_assignments_retrieved[$timewindow]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Assignments Retrieved",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Assignments",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "reqps",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"schemaVersion": 18,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
"current": {
"text": "5m",
"value": "5m"
},
"hide": 0,
"includeAll": false,
"label": "Time Window",
"multi": false,
"name": "timewindow",
"options": [
{
"selected": true,
"text": "5m",
"value": "5m"
},
{
"selected": false,
"text": "10m",
"value": "10m"
},
{
"selected": false,
"text": "15m",
"value": "15m"
},
{
"selected": false,
"text": "30m",
"value": "30m"
},
{
"selected": false,
"text": "1h",
"value": "1h"
},
{
"selected": false,
"text": "4h",
"value": "4h"
}
],
"query": "5m,10m,15m,30m,1h,4h",
"skipUrlSync": false,
"type": "custom"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Tickets",
"uid": "TlgyFfIWz",
"version": 6
}

View File

@ -68,8 +68,15 @@ data:
swaggerui:
hostname: "{{ .Values.swaggerui.hostName }}"
httpport: "{{ .Values.swaggerui.httpPort }}"
# Configurations for api.test and api.scale are used for testing.
test:
hostname: "test"
grpcport: "50509"
httpport: "51509"
scale:
httpport: "51509"
{{- if .Values.global.tls.enabled }}
tls:
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
@ -78,17 +85,12 @@ data:
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
{{- end }}
storage:
ignoreListTTL: {{ index .Values "open-match-core" "ignoreListTTL" }}
page:
size: 10000
redis:
{{- if index .Values "open-match-core" "redis" "enabled" }}
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ .Values.redis.fullnameOverride }}.{{ .Release.Namespace }}.svc.cluster.local
sentinelHostname: {{ .Values.redis.fullnameOverride }}
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
@ -111,11 +113,12 @@ data:
healthCheckTimeout: {{ index .Values "open-match-core" "redis" "pool" "healthCheckTimeout" }}
telemetry:
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
traceSamplingFraction: "{{ .Values.global.telemetry.traceSamplingFraction }}"
zpages:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
prometheus:

View File

@ -25,12 +25,24 @@ metadata:
release: {{ .Release.Name }}
data:
matchmaker_config_override.yaml: |-
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: {{ index .Values "open-match-core" "registrationInterval" }}
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: {{ index .Values "open-match-core" "proposalCollectionInterval" }}
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: {{ index .Values "open-match-core" "pendingReleaseTimeout" }}
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: {{ index .Values "open-match-core" "assignedDeleteTimeout" }}
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: {{ index .Values "open-match-core" "queryPageSize" }}
api:
evaluator:
hostname: "{{ .Values.evaluator.hostName }}"
grpcport: "{{ .Values.evaluator.grpcPort }}"
httpport: "{{ .Values.evaluator.httpPort }}"
synchronizer:
registrationIntervalMs: 250ms
proposalCollectionIntervalMs: 20000ms
{{- end }}

View File

@ -1,3 +1,19 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
# This applies om-test-role to the open-match-test-service account under the release namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@ -16,3 +32,5 @@ roleRef:
kind: Role
name: om-test-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -1,3 +1,19 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -27,3 +43,5 @@ rules:
verbs:
- get
- list
{{- end }}

View File

@ -1,3 +1,19 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
# Create a service account for open-match-test services.
apiVersion: v1
kind: ServiceAccount
@ -9,3 +25,5 @@ metadata:
app: {{ template "openmatch.name" . }}
release: {{ .Release.Name }}
automountServiceAccountToken: true
{{- end }}

View File

@ -1,24 +1,83 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if .Values.ci }}
kind: Service
apiVersion: v1
metadata:
name: test
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: test
release: {{ .Release.Name }}
spec:
selector:
app: {{ template "openmatch.name" . }}
component: test
release: {{ .Release.Name }}
ports:
- name: grpc
protocol: TCP
port: 50509
- name: http
protocol: TCP
port: 51509
---
apiVersion: v1
kind: Pod
metadata:
name: om-test
name: test
namespace: {{ .Release.Namespace }}
annotations:
{{- include "openmatch.chartmeta" . | nindent 4 }}
"helm.sh/hook": test-success
labels:
app: {{ template "openmatch.name" . }}
component: om-test
component: test
release: {{ .Release.Name }}
spec:
# Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it.
activeDeadlineSeconds: 900
serviceAccountName: open-match-test-service
automountServiceAccountToken: true
volumes:
- configMap:
defaultMode: 420
name: om-configmap-default
name: om-config-volume-default
- configMap:
defaultMode: 420
name: om-configmap-override
name: om-config-volume-override
containers:
- image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
- name: "test"
volumeMounts:
- mountPath: /app/config/default
name: om-config-volume-default
- mountPath: /app/config/override
name: om-config-volume-override
image: "{{ .Values.global.image.registry }}/openmatch-base-build:{{ .Values.global.image.tag }}"
ports:
- name: grpc
containerPort: 50509
- name: http
containerPort: 51509
imagePullPolicy: Always
name: om-test
name: test
resources:
limits:
memory: 800Mi
@ -32,7 +91,7 @@ spec:
command: ["go"]
args:
- "test"
- "./test/e2e"
- "./internal/testing/e2e"
- "-v"
- "-timeout"
- "150s"
@ -40,3 +99,5 @@ spec:
- "-tags"
- "e2ecluster"
restartPolicy: Never
{{- end }}

View File

@ -172,7 +172,23 @@ redis:
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
open-match-core:
enabled: true
ignoreListTTL: 60000ms
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: 20s
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: 1m
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
@ -256,7 +272,7 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.0.0-dev
tag: 1.0.0-rc.1
pullPolicy: Always
@ -264,11 +280,12 @@ global:
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
telemetry:
reportingPeriod: "1m"
traceSamplingFraction: 0.005 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
samplerFraction: 0.005 # Configures a sampler that samples a given fraction of traces.
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
@ -280,4 +297,3 @@ global:
prefix: "open_match"
grafana:
enabled: false
reportingPeriod: "1m"

View File

@ -157,7 +157,23 @@ redis:
# Controls if users need to install backend, frontend, query, om-configmap, and swaggerui.
open-match-core:
enabled: true
ignoreListTTL: 60000ms
# Length of time between first fetch matches call, and when no further fetch
# matches calls will join the current evaluation/synchronization cycle,
# instead waiting for the next cycle.
registrationInterval: 250ms
# Length of time after match function as started before it will be canceled,
# and evaluator call input is EOF.
proposalCollectionInterval: 20s
# Time after a ticket has been returned from fetch matches (marked as pending)
# before it automatically becomes active again and will be returned by query
# calls.
pendingReleaseTimeout: 1m
# Time after a ticket has been assigned before it is automatically delted.
assignedDeleteTimeout: 10m
# Maximum number of tickets to return on a single QueryTicketsResponse.
queryPageSize: 10000
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
@ -241,7 +257,7 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.0.0-dev
tag: 1.0.0-rc.1
pullPolicy: Always
@ -249,11 +265,12 @@ global:
# requires pod-level annotation to customize its scrape path.
# See definitions in templates/_helpers.tpl - "prometheus.annotations" section for details
telemetry:
reportingPeriod: "1m"
traceSamplingFraction: 0.01 # What fraction of traces to sample.
zpages:
enabled: true
jaeger:
enabled: false
samplerFraction: 0.01 # Configures a sampler that samples a given fraction of traces.
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
@ -265,4 +282,3 @@ global:
prefix: "open_match"
grafana:
enabled: false
reportingPeriod: "1m"

View File

@ -1,55 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package app contains the common application initialization code for Open Match servers.
package app
import (
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/rpc"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "app.main",
})
)
// RunApplication creates a server.
func RunApplication(serverName string, getCfg func() (config.View, error), bindService func(*rpc.ServerParams, config.View) error) {
cfg, err := getCfg()
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot read configuration.")
}
logging.ConfigureLogging(cfg)
p, err := rpc.NewServerParamsFromConfig(cfg, "api."+serverName)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot construct server.")
}
if err := bindService(p, cfg); err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("failed to bind %s service.", serverName)
}
rpc.MustServeForever(p)
}

View File

@ -15,25 +15,72 @@
package backend
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
totalBytesPerMatch = stats.Int64("open-match.dev/backend/total_bytes_per_match", "Total bytes per match", stats.UnitBytes)
ticketsPerMatch = stats.Int64("open-match.dev/backend/tickets_per_match", "Number of tickets per match", stats.UnitDimensionless)
ticketsReleased = stats.Int64("open-match.dev/backend/tickets_released", "Number of tickets released per request", stats.UnitDimensionless)
ticketsAssigned = stats.Int64("open-match.dev/backend/tickets_assigned", "Number of tickets assigned per request", stats.UnitDimensionless)
totalMatchesView = &view.View{
Measure: totalBytesPerMatch,
Name: "open-match.dev/backend/total_matches",
Description: "Total number of matches",
Aggregation: view.Count(),
}
totalBytesPerMatchView = &view.View{
Measure: totalBytesPerMatch,
Name: "open-match.dev/backend/total_bytes_per_match",
Description: "Total bytes per match",
Aggregation: telemetry.DefaultBytesDistribution,
}
ticketsPerMatchView = &view.View{
Measure: ticketsPerMatch,
Name: "open-match.dev/backend/tickets_per_match",
Description: "Tickets per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
ticketsAssignedView = &view.View{
Measure: ticketsAssigned,
Name: "open-match.dev/backend/tickets_assigned",
Description: "Number of tickets assigned per request",
Aggregation: view.Sum(),
}
ticketsReleasedView = &view.View{
Measure: ticketsReleased,
Name: "open-match.dev/backend/tickets_released",
Description: "Number of tickets released per request",
Aggregation: view.Sum(),
}
)
// BindService creates the backend service and binds it to the serving harness.
func BindService(p *rpc.ServerParams, cfg config.View) error {
func BindService(p *appmain.Params, b *appmain.Bindings) error {
service := &backendService{
synchronizer: newSynchronizerClient(cfg),
store: statestore.New(cfg),
cc: rpc.NewClientCache(cfg),
synchronizer: newSynchronizerClient(p.Config()),
store: statestore.New(p.Config()),
cc: rpc.NewClientCache(p.Config()),
}
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
b.AddHealthCheckFunc(service.store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterBackendServiceServer(s, service)
}, pb.RegisterBackendServiceHandlerFromEndpoint)
b.RegisterViews(
totalMatchesView,
totalBytesPerMatchView,
ticketsPerMatchView,
ticketsAssignedView,
ticketsReleasedView,
)
return nil
}

View File

@ -23,16 +23,20 @@ import (
"strings"
"sync"
"go.opencensus.io/stats"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain/contextcause"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
@ -49,10 +53,6 @@ var (
"app": "openmatch",
"component": "app.backend",
})
mMatchesFetched = telemetry.Counter("backend/matches_fetched", "matches fetched")
mMatchesSentToEvaluation = telemetry.Counter("backend/matches_sent_to_evaluation", "matches sent to evaluation")
mTicketsAssigned = telemetry.Counter("backend/tickets_assigned", "tickets assigned")
mTicketsReleased = telemetry.Counter("backend/tickets_released", "tickets released")
)
// FetchMatches triggers a MatchFunction with the specified MatchProfiles, while each MatchProfile
@ -60,10 +60,10 @@ var (
// FetchMatches immediately returns an error if it encounters any execution failures.
// - If the synchronizer is enabled, FetchMatch will then call the synchronizer to deduplicate proposals with overlapped tickets.
func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.BackendService_FetchMatchesServer) error {
if req.GetConfig() == nil {
if req.Config == nil {
return status.Error(codes.InvalidArgument, ".config is required")
}
if req.GetProfile() == nil {
if req.Profile == nil {
return status.Error(codes.InvalidArgument, ".profile is required")
}
@ -77,7 +77,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
// The mmf must be canceled if the synchronizer call fails (which will
// cancel the context from the error group). However the synchronizer call
// is NOT dependant on the mmf call.
mmfCtx, cancelMmfs := context.WithCancel(ctx)
mmfCtx, cancelMmfs := contextcause.WithCancelCause(ctx)
// Closed when mmfs should start.
startMmfs := make(chan struct{})
proposals := make(chan *pb.Match)
@ -127,11 +127,10 @@ sendProposals:
if !ok {
break sendProposals
}
id, loaded := m.LoadOrStore(p.GetMatchId(), p)
_, loaded := m.LoadOrStore(p.GetMatchId(), p)
if loaded {
return fmt.Errorf("found duplicate matchID %s returned from MMF", id)
return fmt.Errorf("MatchMakingFunction returned same match_id twice: \"%s\"", p.GetMatchId())
}
telemetry.RecordUnitMeasurement(ctx, mMatchesSentToEvaluation)
err := syncStream.Send(&ipb.SynchronizeRequest{Proposal: p})
if err != nil {
return fmt.Errorf("error sending proposal to synchronizer: %w", err)
@ -146,7 +145,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs context.CancelFunc) error {
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs contextcause.CancelErrFunc) error {
var startMmfsOnce sync.Once
for {
@ -165,12 +164,17 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
}
if resp.CancelMmfs {
cancelMmfs()
cancelMmfs(errors.New("match function ran longer than proposal window, canceling"))
}
if match, ok := m.Load(resp.GetMatchId()); ok {
telemetry.RecordUnitMeasurement(ctx, mMatchesFetched)
err = stream.Send(&pb.FetchMatchesResponse{Match: match.(*pb.Match)})
if v, ok := m.Load(resp.GetMatchId()); ok {
match, ok := v.(*pb.Match)
if !ok {
return fmt.Errorf("error casting sync map value into *pb.Match: %w", err)
}
stats.Record(ctx, totalBytesPerMatch.M(int64(proto.Size(match))))
stats.Record(ctx, ticketsPerMatch.M(int64(len(match.GetTickets()))))
err = stream.Send(&pb.FetchMatchesResponse{Match: match})
if err != nil {
return fmt.Errorf("error sending match to caller of backend: %w", err)
}
@ -208,6 +212,10 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
stream, err := client.Run(ctx, &pb.RunRequest{Profile: profile})
if err != nil {
logger.WithError(err).Error("failed to run match function for profile")
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
}
return err
}
@ -218,6 +226,10 @@ func callGrpcMmf(ctx context.Context, cc *rpc.ClientCache, profile *pb.MatchProf
}
if err != nil {
logger.Errorf("%v.Run() error, %v\n", client, err)
if ctx.Err() != nil {
// gRPC likes to suppress the context's error, so stop that.
return ctx.Err()
}
return err
}
select {
@ -300,10 +312,18 @@ func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTick
return nil, err
}
telemetry.RecordNUnitMeasurement(ctx, mTicketsReleased, int64(len(req.TicketIds)))
stats.Record(ctx, ticketsReleased.M(int64(len(req.TicketIds))))
return &pb.ReleaseTicketsResponse{}, nil
}
func (s *backendService) ReleaseAllTickets(ctx context.Context, req *pb.ReleaseAllTicketsRequest) (*pb.ReleaseAllTicketsResponse, error) {
err := s.store.ReleaseAllTickets(ctx)
if err != nil {
return nil, err
}
return &pb.ReleaseAllTicketsResponse{}, nil
}
// AssignTickets overwrites the Assignment field of the input TicketIds.
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp, err := doAssignTickets(ctx, req, s.store)
@ -317,7 +337,7 @@ func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicket
numIds += len(ag.TicketIds)
}
telemetry.RecordNUnitMeasurement(ctx, mTicketsAssigned, int64(numIds))
stats.Record(ctx, ticketsAssigned.M(int64(numIds)))
return resp, nil
}

View File

@ -16,12 +16,17 @@
package defaulteval
import (
"context"
"math"
"sort"
"go.opencensus.io/stats"
"github.com/golang/protobuf/ptypes"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/pkg/pb"
)
@ -30,6 +35,14 @@ var (
"app": "evaluator",
"component": "evaluator.default",
})
collidedMatchesPerEvaluate = stats.Int64("open-match.dev/defaulteval/collided_matches_per_call", "Number of collided matches per default evaluator call", stats.UnitDimensionless)
collidedMatchesPerEvaluateView = &view.View{
Measure: collidedMatchesPerEvaluate,
Name: "open-match.dev/defaulteval/collided_matches_per_call",
Description: "Number of collided matches per default evaluator call",
Aggregation: view.Sum(),
}
)
type matchInp struct {
@ -37,13 +50,22 @@ type matchInp struct {
inp *pb.DefaultEvaluationCriteria
}
// Evaluate sorts the matches by DefaultEvaluationCriteria.Score (optional),
// BindService define the initialization steps for this evaluator
func BindService(p *appmain.Params, b *appmain.Bindings) error {
if err := evaluator.BindServiceFor(evaluate)(p, b); err != nil {
return err
}
b.RegisterViews(collidedMatchesPerEvaluateView)
return nil
}
// evaluate sorts the matches by DefaultEvaluationCriteria.Score (optional),
// then returns matches which don't collide with previously returned matches.
func Evaluate(p *evaluator.Params) ([]string, error) {
matches := make([]*matchInp, 0, len(p.Matches))
func evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
matches := make([]*matchInp, 0)
nilEvlautionInputs := 0
for _, m := range p.Matches {
for m := range in {
// Evaluation criteria is optional, but sort it lower than any matches which
// provided criteria.
inp := &pb.DefaultEvaluationCriteria{
@ -84,7 +106,13 @@ func Evaluate(p *evaluator.Params) ([]string, error) {
d.maybeAdd(m)
}
return d.resultIDs, nil
stats.Record(context.Background(), collidedMatchesPerEvaluate.M(int64(len(matches)-len(d.resultIDs))))
for _, id := range d.resultIDs {
out <- id
}
return nil
}
type collidingMatch struct {

View File

@ -15,13 +15,13 @@
package defaulteval
import (
"context"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/pkg/pb"
)
@ -114,9 +114,21 @@ func TestEvaluate(t *testing.T) {
test := test
t.Run(test.description, func(t *testing.T) {
t.Parallel()
gotMatchIDs, err := Evaluate(&evaluator.Params{Matches: test.testMatches})
in := make(chan *pb.Match, 10)
out := make(chan string, 10)
for _, m := range test.testMatches {
in <- m
}
close(in)
err := evaluate(context.Background(), in, out)
assert.Nil(t, err)
gotMatchIDs := []string{}
close(out)
for id := range out {
gotMatchIDs = append(gotMatchIDs, id)
}
assert.Equal(t, len(test.wantMatchIDs), len(gotMatchIDs))
for _, mID := range gotMatchIDs {

View File

@ -16,35 +16,42 @@
package evaluator
import (
"github.com/spf13/viper"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
// RunEvaluator is a hook for the main() method in the main executable.
func RunEvaluator(eval Evaluator) {
app.RunApplication("evaluator", getCfg, func(p *rpc.ServerParams, cfg config.View) error {
return BindService(p, cfg, eval)
})
}
// BindService creates the evaluator service to the server Params.
func BindService(p *rpc.ServerParams, cfg config.View, eval Evaluator) error {
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterEvaluatorServer(s, &evaluatorService{evaluate: eval})
}, pb.RegisterEvaluatorHandlerFromEndpoint)
return nil
}
func getCfg() (config.View, error) {
cfg := viper.New()
cfg.Set("api.evaluator.hostname", "om-evaluator")
cfg.Set("api.evaluator.grpcport", 50508)
cfg.Set("api.evaluator.httpport", 51508)
return cfg, nil
var (
matchesPerEvaluateRequest = stats.Int64("open-match.dev/evaluator/matches_per_request", "Number of matches sent to the evaluator per request", stats.UnitDimensionless)
matchesPerEvaluateResponse = stats.Int64("open-match.dev/evaluator/matches_per_response", "Number of matches returned by the evaluator per response", stats.UnitDimensionless)
matchesPerEvaluateRequestView = &view.View{
Measure: matchesPerEvaluateRequest,
Name: "open-match.dev/evaluator/matches_per_request",
Description: "Number of matches sent to the evaluator per request",
Aggregation: telemetry.DefaultCountDistribution,
}
matchesPerEvaluateResponseView = &view.View{
Measure: matchesPerEvaluateResponse,
Name: "open-match.dev/evaluator/matches_per_response",
Description: "Number of matches sent to the evaluator per response",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindServiceFor creates the evaluator service and binds it to the serving harness.
func BindServiceFor(eval Evaluator) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterEvaluatorServer(s, &evaluatorService{eval})
}, pb.RegisterEvaluatorHandlerFromEndpoint)
b.RegisterViews(
matchesPerEvaluateRequestView,
matchesPerEvaluateResponseView,
)
return nil
}
}

View File

@ -16,13 +16,13 @@
package evaluator
import (
"context"
"io"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/pkg/pb"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"golang.org/x/sync/errgroup"
"open-match.dev/open-match/pkg/pb"
)
var (
@ -35,7 +35,7 @@ var (
// Evaluator is the function signature for the Evaluator to be implemented by
// the user. The harness will pass the Matches to evaluate to the Evaluator
// and the Evaluator will return an accepted list of Matches.
type Evaluator func(*Params) ([]string, error)
type Evaluator func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error
// evaluatorService implements pb.EvaluatorServer, the server generated by
// compiling the protobuf, by fulfilling the pb.EvaluatorServer interface.
@ -43,51 +43,60 @@ type evaluatorService struct {
evaluate Evaluator
}
// Params is the parameters to be passed by the harness to the evaluator.
// - logger:
// A logger used to generate error/debug logs
// - Matches
// Matches to be evaluated
type Params struct {
Logger *logrus.Entry
Matches []*pb.Match
}
// Evaluate is this harness's implementation of the gRPC call defined in
// api/evaluator.proto.
func (s *evaluatorService) Evaluate(stream pb.Evaluator_EvaluateServer) error {
var matches = []*pb.Match{}
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
matches = append(matches, req.GetMatch())
}
g, ctx := errgroup.WithContext(stream.Context())
// Run the customized evaluator!
results, err := s.evaluate(&Params{
Logger: logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "evaluator.implementation",
}),
Matches: matches,
in := make(chan *pb.Match)
out := make(chan string)
g.Go(func() error {
defer close(in)
count := 0
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
select {
case in <- req.Match:
count++
case <-ctx.Done():
return ctx.Err()
}
}
stats.Record(ctx, matchesPerEvaluateRequest.M(int64(count)))
return nil
})
if err != nil {
return status.Error(codes.Aborted, err.Error())
}
g.Go(func() error {
defer close(out)
return s.evaluate(ctx, in, out)
})
g.Go(func() error {
defer func() {
for range out {
}
}()
for _, result := range results {
if err := stream.Send(&pb.EvaluateResponse{MatchId: result}); err != nil {
return err
count := 0
for id := range out {
err := stream.Send(&pb.EvaluateResponse{MatchId: id})
if err != nil {
return err
}
count++
}
}
stats.Record(ctx, matchesPerEvaluateResponse.M(int64(count)))
return nil
})
logger.WithFields(logrus.Fields{
"results": results,
}).Debug("matches accepted by the evaluator")
return nil
err := g.Wait()
if err != nil {
logger.WithError(err).Error("Error in evaluator.Evaluate")
}
return err
}

View File

@ -15,24 +15,47 @@
package frontend
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
totalBytesPerTicket = stats.Int64("open-match.dev/frontend/total_bytes_per_ticket", "Total bytes per ticket", stats.UnitBytes)
searchFieldsPerTicket = stats.Int64("open-match.dev/frontend/searchfields_per_ticket", "Searchfields per ticket", stats.UnitDimensionless)
totalBytesPerTicketView = &view.View{
Measure: totalBytesPerTicket,
Name: "open-match.dev/frontend/total_bytes_per_ticket",
Description: "Total bytes per ticket",
Aggregation: telemetry.DefaultBytesDistribution,
}
searchFieldsPerTicketView = &view.View{
Measure: searchFieldsPerTicket,
Name: "open-match.dev/frontend/searchfields_per_ticket",
Description: "SearchFields per ticket",
Aggregation: telemetry.DefaultCountDistribution,
}
)
// BindService creates the frontend service and binds it to the serving harness.
func BindService(p *rpc.ServerParams, cfg config.View) error {
func BindService(p *appmain.Params, b *appmain.Bindings) error {
service := &frontendService{
cfg: cfg,
store: statestore.New(cfg),
cfg: p.Config(),
store: statestore.New(p.Config()),
}
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
b.AddHealthCheckFunc(service.store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, service)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
b.RegisterViews(
totalBytesPerTicketView,
searchFieldsPerTicketView,
)
return nil
}

View File

@ -22,12 +22,12 @@ import (
"github.com/golang/protobuf/ptypes/empty"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
@ -43,10 +43,6 @@ var (
"app": "openmatch",
"component": "app.frontend",
})
mTicketsCreated = telemetry.Counter("frontend/tickets_created", "tickets created")
mTicketsDeleted = telemetry.Counter("frontend/tickets_deleted", "tickets deleted")
mTicketsRetrieved = telemetry.Counter("frontend/tickets_retrieved", "tickets retrieved")
mTicketAssignmentsRetrieved = telemetry.Counter("frontend/tickets_assignments_retrieved", "ticket assignments retrieved")
)
// CreateTicket assigns an unique TicketId to the input Ticket and record it in state storage.
@ -77,6 +73,14 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
ticket.Id = xid.New().String()
ticket.CreateTime = ptypes.TimestampNow()
sfCount := 0
sfCount += len(ticket.GetSearchFields().GetDoubleArgs())
sfCount += len(ticket.GetSearchFields().GetStringArgs())
sfCount += len(ticket.GetSearchFields().GetTags())
stats.Record(ctx, searchFieldsPerTicket.M(int64(sfCount)))
stats.Record(ctx, totalBytesPerTicket.M(int64(proto.Size(ticket))))
err := store.CreateTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
@ -95,7 +99,6 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
return nil, err
}
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
return ticket, nil
}
@ -108,7 +111,6 @@ func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicket
if err != nil {
return nil, err
}
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
return &empty.Empty{}, nil
}
@ -150,7 +152,6 @@ func doDeleteTicket(ctx context.Context, id string, store statestore.Service) er
// GetTicket get the Ticket associated with the specified TicketId.
func (s *frontendService) GetTicket(ctx context.Context, req *pb.GetTicketRequest) (*pb.Ticket, error) {
telemetry.RecordUnitMeasurement(ctx, mTicketsRetrieved)
return doGetTickets(ctx, req.GetTicketId(), s.store)
}
@ -177,7 +178,6 @@ func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stre
return ctx.Err()
default:
sender := func(assignment *pb.Assignment) error {
telemetry.RecordUnitMeasurement(ctx, mTicketAssignmentsRetrieved)
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
}
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)

View File

@ -19,25 +19,24 @@ import (
"open-match.dev/open-match/internal/app/frontend"
"open-match.dev/open-match/internal/app/query"
"open-match.dev/open-match/internal/app/synchronizer"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/appmain"
)
// BindService creates the minimatch service to the server Params.
func BindService(p *rpc.ServerParams, cfg config.View) error {
if err := backend.BindService(p, cfg); err != nil {
func BindService(p *appmain.Params, b *appmain.Bindings) error {
if err := backend.BindService(p, b); err != nil {
return err
}
if err := frontend.BindService(p, cfg); err != nil {
if err := frontend.BindService(p, b); err != nil {
return err
}
if err := query.BindService(p, cfg); err != nil {
if err := query.BindService(p, b); err != nil {
return err
}
if err := synchronizer.BindService(p, cfg); err != nil {
if err := synchronizer.BindService(p, b); err != nil {
return err
}

View File

@ -15,22 +15,76 @@
package query
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
ticketsPerQuery = stats.Int64("open-match.dev/query/tickets_per_query", "Number of tickets per query", stats.UnitDimensionless)
cacheTotalItems = stats.Int64("open-match.dev/query/total_cache_items", "Total number of tickets query service cached", stats.UnitDimensionless)
cacheFetchedItems = stats.Int64("open-match.dev/query/fetched_items", "Number of fetched items in total", stats.UnitDimensionless)
cacheWaitingQueries = stats.Int64("open-match.dev/query/waiting_queries", "Number of waiting queries in the last update", stats.UnitDimensionless)
cacheUpdateLatency = stats.Float64("open-match.dev/query/update_latency", "Time elapsed of each query cache update", stats.UnitMilliseconds)
ticketsPerQueryView = &view.View{
Measure: ticketsPerQuery,
Name: "open-match.dev/query/tickets_per_query",
Description: "Tickets per query",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheTotalItemsView = &view.View{
Measure: cacheTotalItems,
Name: "open-match.dev/query/total_cached_items",
Description: "Total number of cached tickets",
Aggregation: view.LastValue(),
}
cacheFetchedItemsView = &view.View{
Measure: cacheFetchedItems,
Name: "open-match.dev/query/total_fetched_items",
Description: "Total number of fetched tickets",
Aggregation: view.Sum(),
}
cacheUpdateView = &view.View{
Measure: cacheWaitingQueries,
Name: "open-match.dev/query/cache_updates",
Description: "Number of query cache updates in total",
Aggregation: view.Count(),
}
cacheWaitingQueriesView = &view.View{
Measure: cacheWaitingQueries,
Name: "open-match.dev/query/waiting_requests",
Description: "Number of waiting requests in total",
Aggregation: telemetry.DefaultCountDistribution,
}
cacheUpdateLatencyView = &view.View{
Measure: cacheUpdateLatency,
Name: "open-match.dev/query/update_latency",
Description: "Time elapsed of each query cache update",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the query service and binds it to the serving harness.
func BindService(p *rpc.ServerParams, cfg config.View) error {
func BindService(p *appmain.Params, b *appmain.Bindings) error {
service := &queryService{
cfg: cfg,
tc: newTicketCache(p, cfg),
cfg: p.Config(),
tc: newTicketCache(b, p.Config()),
}
p.AddHandleFunc(func(s *grpc.Server) {
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterQueryServiceServer(s, service)
}, pb.RegisterQueryServiceHandlerFromEndpoint)
b.RegisterViews(
ticketsPerQueryView,
cacheTotalItemsView,
cacheUpdateView,
cacheFetchedItemsView,
cacheWaitingQueriesView,
cacheUpdateLatencyView,
)
return nil
}

View File

@ -17,14 +17,17 @@ package query
import (
"context"
"sync"
"time"
"go.opencensus.io/stats"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
@ -44,6 +47,7 @@ type queryService struct {
}
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
@ -55,7 +59,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
var results []*pb.Ticket
err = s.tc.request(responseServer.Context(), func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
@ -66,6 +70,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
logger.WithError(err).Error("Failed to run request.")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
@ -86,6 +91,7 @@ func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer
}
func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseServer pb.QueryService_QueryTicketIdsServer) error {
ctx := responseServer.Context()
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
@ -97,7 +103,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
}
var results []string
err = s.tc.request(responseServer.Context(), func(tickets map[string]*pb.Ticket) {
err = s.tc.request(ctx, func(tickets map[string]*pb.Ticket) {
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
@ -108,6 +114,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
logger.WithError(err).Error("Failed to run request.")
return err
}
stats.Record(ctx, ticketsPerQuery.M(int64(len(results))))
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
@ -129,7 +136,7 @@ func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseSer
func getPageSize(cfg config.View) int {
const (
name = "storage.page.size"
name = "queryPageSize"
// Minimum number of tickets to be returned in a streamed response for QueryTickets. This value
// will be used if page size is configured lower than the minimum value.
minPageSize int = 10
@ -145,7 +152,7 @@ func getPageSize(cfg config.View) int {
return defaultPageSize
}
pSize := cfg.GetInt("storage.page.size")
pSize := cfg.GetInt(name)
if pSize < minPageSize {
logger.Infof("page size %v is lower than the minimum limit of %v", pSize, maxPageSize)
pSize = minPageSize
@ -182,7 +189,7 @@ type ticketCache struct {
err error
}
func newTicketCache(p *rpc.ServerParams, cfg config.View) *ticketCache {
func newTicketCache(b *appmain.Bindings, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
@ -191,7 +198,7 @@ func newTicketCache(p *rpc.ServerParams, cfg config.View) *ticketCache {
}
tc.startRunRequest <- struct{}{}
p.AddHealthCheckFunc(tc.store.HealthCheck)
b.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
@ -254,6 +261,7 @@ collectAllWaiting:
}
tc.update()
stats.Record(context.Background(), cacheWaitingQueries.M(int64(len(reqs))))
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
@ -271,6 +279,7 @@ collectAllWaiting:
}
func (tc *ticketCache) update() {
st := time.Now()
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
@ -305,6 +314,10 @@ func (tc *ticketCache) update() {
tc.tickets[t.Id] = t
}
stats.Record(context.Background(), cacheTotalItems.M(int64(previousCount)))
stats.Record(context.Background(), cacheFetchedItems.M(int64(len(toFetch))))
stats.Record(context.Background(), cacheUpdateLatency.M(float64(time.Since(st))/float64(time.Millisecond)))
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

View File

@ -35,21 +35,21 @@ func TestGetPageSize(t *testing.T) {
{
"set",
func(cfg config.Mutable) {
cfg.Set("storage.page.size", "2156")
cfg.Set("queryPageSize", "2156")
},
2156,
},
{
"low",
func(cfg config.Mutable) {
cfg.Set("storage.page.size", "9")
cfg.Set("queryPageSize", "9")
},
10,
},
{
"high",
func(cfg config.Mutable) {
cfg.Set("storage.page.size", "10001")
cfg.Set("queryPageSize", "10001")
},
10000,
},

View File

@ -53,8 +53,6 @@ func RunApplication() {
func serve(cfg config.View) {
mux := &http.ServeMux{}
closer := telemetry.Setup("swaggerui", mux, cfg)
defer closer()
port := cfg.GetInt("api.swaggerui.httpport")
baseDir, err := os.Getwd()
if err != nil {

View File

@ -40,7 +40,7 @@ var (
)
type evaluator interface {
evaluate(context.Context, <-chan []*pb.Match) ([]string, error)
evaluate(context.Context, <-chan []*pb.Match, chan<- string) error
}
var errNoEvaluatorType = status.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
@ -66,17 +66,17 @@ type deferredEvaluator struct {
cacher *config.Cacher
}
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
func (de *deferredEvaluator) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
e, err := de.cacher.Get()
if err != nil {
return nil, err
return err
}
matches, err := e.(evaluator).evaluate(ctx, pc)
err = e.(evaluator).evaluate(ctx, pc, acceptedIds)
if err != nil {
de.cacher.ForceReset()
}
return matches, err
return err
}
type grcpEvaluatorClient struct {
@ -106,7 +106,7 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
}, close, nil
}
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
eg, ctx := errgroup.WithContext(ctx)
var stream pb.Evaluator_EvaluateClient
@ -114,18 +114,17 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
var err error
stream, err = ec.evaluator.Evaluate(ctx)
if err != nil {
return nil, fmt.Errorf("error starting evaluator call: %w", err)
return fmt.Errorf("error starting evaluator call: %w", err)
}
}
results := []string{}
matchIDs := &sync.Map{}
eg.Go(func() error {
for proposals := range pc {
for _, proposal := range proposals {
if _, ok := matchIDs.LoadOrStore(proposal.GetMatchId(), true); ok {
return fmt.Errorf("found duplicate matchID %s", proposal.GetMatchId())
return fmt.Errorf("multiple match functions used same match_id: \"%s\"", proposal.GetMatchId())
}
if err := stream.Send(&pb.EvaluateRequest{Match: proposal}); err != nil {
return fmt.Errorf("failed to send request to evaluator, desc: %w", err)
@ -150,22 +149,23 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
return fmt.Errorf("failed to get response from evaluator client, desc: %w", err)
}
v, ok := matchIDs.LoadOrStore(resp.GetMatchId(), false)
v, ok := matchIDs.Load(resp.GetMatchId())
if !ok {
return fmt.Errorf("evaluator returned unmatched matchID %s which does not correspond to its input", resp.GetMatchId())
return fmt.Errorf("evaluator returned match_id \"%s\" which does not correspond to its any match in its input", resp.GetMatchId())
}
if !v.(bool) {
return fmt.Errorf("evaluator returned duplicated matchID %s", resp.GetMatchId())
return fmt.Errorf("evaluator returned same match_id twice: \"%s\"", resp.GetMatchId())
}
results = append(results, resp.GetMatchId())
matchIDs.Store(resp.GetMatchId(), false)
acceptedIds <- resp.GetMatchId()
}
})
err := eg.Wait()
if err != nil {
return nil, err
return err
}
return results, nil
return nil
}
type httpEvaluatorClient struct {
@ -194,7 +194,7 @@ func newHTTPEvaluator(cfg config.View) (evaluator, func(), error) {
}, close, nil
}
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match, acceptedIds chan<- string) error {
reqr, reqw := io.Pipe()
var wg sync.WaitGroup
wg.Add(1)
@ -227,14 +227,14 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
req, err := http.NewRequest("POST", ec.baseURL+"/v1/evaluator/matches:evaluate", reqr)
if err != nil {
return nil, status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
return status.Errorf(codes.Aborted, "failed to create evaluator http request, desc: %s", err.Error())
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Transfer-Encoding", "chunked")
resp, err := ec.httpClient.Do(req.WithContext(ctx))
if err != nil {
return nil, status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
return status.Errorf(codes.Aborted, "failed to get response from evaluator, desc: %s", err.Error())
}
defer func() {
if resp.Body.Close() != nil {
@ -243,7 +243,6 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
}()
wg.Add(1)
var results = []string{}
rc := make(chan error, 1)
defer close(rc)
go func() {
@ -272,16 +271,16 @@ func (ec *httpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
rc <- status.Errorf(codes.Unavailable, "failed to execute jsonpb.UnmarshalString(%s, &proposal): %v.", item.Result, err)
return
}
results = append(results, resp.GetMatchId())
acceptedIds <- resp.GetMatchId()
}
}()
wg.Wait()
if len(sc) != 0 {
return nil, <-sc
return <-sc
}
if len(rc) != 0 {
return nil, <-rc
return <-rc
}
return results, nil
return nil
}

View File

@ -15,21 +15,52 @@
package synchronizer
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
)
var (
iterationLatency = stats.Float64("open-match.dev/synchronizer/iteration_latency", "Time elapsed of each synchronizer iteration", stats.UnitMilliseconds)
registrationWaitTime = stats.Float64("open-match.dev/synchronizer/registration_wait_time", "Time elapsed of registration wait time", stats.UnitMilliseconds)
registrationMMFDoneTime = stats.Float64("open-match.dev/synchronizer/registration_mmf_done_time", "Time elapsed wasted in registration window with done MMFs", stats.UnitMilliseconds)
iterationLatencyView = &view.View{
Measure: iterationLatency,
Name: "open-match.dev/synchronizer/iteration_latency",
Description: "Time elapsed of each synchronizer iteration",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
registrationWaitTimeView = &view.View{
Measure: registrationWaitTime,
Name: "open-match.dev/synchronizer/registration_wait_time",
Description: "Time elapsed of registration wait time",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
registrationMMFDoneTimeView = &view.View{
Measure: registrationMMFDoneTime,
Name: "open-match.dev/synchronizer/registration_mmf_done_time",
Description: "Time elapsed wasted in registration window with done MMFs",
Aggregation: telemetry.DefaultMillisecondsDistribution,
}
)
// BindService creates the synchronizer service and binds it to the serving harness.
func BindService(p *rpc.ServerParams, cfg config.View) error {
store := statestore.New(cfg)
service := newSynchronizerService(cfg, newEvaluator(cfg), store)
p.AddHealthCheckFunc(store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
func BindService(p *appmain.Params, b *appmain.Bindings) error {
store := statestore.New(p.Config())
service := newSynchronizerService(p.Config(), newEvaluator(p.Config()), store)
b.AddHealthCheckFunc(store.HealthCheck)
b.AddHandleFunc(func(s *grpc.Server) {
ipb.RegisterSynchronizerServer(s, service)
}, nil)
b.RegisterViews(
iterationLatencyView,
registrationWaitTimeView,
registrationMMFDoneTimeView,
)
return nil
}

View File

@ -21,7 +21,10 @@ import (
"sync"
"time"
"go.opencensus.io/stats"
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/appmain/contextcause"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/statestore"
@ -123,7 +126,11 @@ func (s *synchronizerService) Synchronize(stream ipb.Synchronizer_SynchronizeSer
select {
case mIDs, ok := <-m6cBuffer:
if !ok {
return nil
// Prevent race: An error will result in this channel being
// closed as part of cleanup. If it's especially fast, it may
// beat the context done case, so be sure to return any
// potential error.
return registration.cycleCtx.Err()
}
for _, mID := range mIDs {
err = stream.Send(&ipb.SynchronizeResponse{MatchId: mID})
@ -181,6 +188,9 @@ func (s synchronizerService) register(ctx context.Context) *registration {
resp: make(chan *registration),
ctx: ctx,
}
st := time.Now()
defer stats.Record(ctx, registrationWaitTime.M(float64(time.Since(st))/float64(time.Millisecond)))
for {
select {
case s.synchronizeRegistration <- req:
@ -198,8 +208,9 @@ func (s synchronizerService) register(ctx context.Context) *registration {
///////////////////////////////////////
func (s *synchronizerService) runCycle() {
cst := time.Now()
/////////////////////////////////////// Initialize cycle
ctx, cancel := withCancelCause(context.Background())
ctx, cancel := contextcause.WithCancelCause(context.Background())
m2c := make(chan mAndM6c)
m3c := make(chan *pb.Match)
@ -236,6 +247,7 @@ func (s *synchronizerService) runCycle() {
}()
/////////////////////////////////////// Run Registration Period
rst := time.Now()
closeRegistration := time.After(s.registrationInterval())
Registration:
for {
@ -268,6 +280,7 @@ Registration:
go func() {
allM1cSent.Wait()
m1c.cutoff()
stats.Record(ctx, registrationMMFDoneTime.M(float64((s.registrationInterval()-time.Since(rst))/time.Millisecond)))
}()
cancelProposalCollection := time.AfterFunc(s.proposalCollectionInterval(), func() {
@ -277,6 +290,7 @@ Registration:
}
})
<-closedOnCycleEnd
stats.Record(ctx, iterationLatency.M(float64(time.Since(cst)/time.Millisecond)))
// Clean up in case it was never needed.
cancelProposalCollection.Stop()
@ -387,13 +401,9 @@ func (c *cutoffSender) cutoff() {
///////////////////////////////////////
// Calls the evaluator with the matches.
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel cancelErrFunc, m3c <-chan []*pb.Match, m5c chan<- string) {
matchIDs, err := s.eval.evaluate(ctx, m3c)
if err == nil {
for _, mID := range matchIDs {
m5c <- mID
}
} else {
func (s *synchronizerService) wrapEvaluator(ctx context.Context, cancel contextcause.CancelErrFunc, m4c <-chan []*pb.Match, m5c chan<- string) {
err := s.eval.evaluate(ctx, m4c, m5c)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err,
}).Error("error calling evaluator, canceling cycle")
@ -428,7 +438,7 @@ func getTicketIds(tickets []*pb.Ticket) []string {
// ignorelist. If it partially fails for whatever reason (not all tickets will
// nessisarily be in the same call), only the matches which can be safely
// returned to the Synchronize calls are.
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel cancelErrFunc, m5c <-chan []string, m6c chan<- string) {
func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *sync.Map, cancel contextcause.CancelErrFunc, m5c <-chan []string, m6c chan<- string) {
totalMatches := 0
successfulMatches := 0
var lastErr error
@ -476,7 +486,7 @@ func (s *synchronizerService) addMatchesToIgnoreList(ctx context.Context, m *syn
func (s *synchronizerService) registrationInterval() time.Duration {
const (
name = "synchronizer.registrationIntervalMs"
name = "registrationInterval"
defaultInterval = time.Second
)
@ -489,7 +499,7 @@ func (s *synchronizerService) registrationInterval() time.Duration {
func (s *synchronizerService) proposalCollectionInterval() time.Duration {
const (
name = "synchronizer.proposalCollectionIntervalMs"
name = "proposalCollectionInterval"
defaultInterval = 10 * time.Second
)
@ -578,46 +588,3 @@ func bufferStringChannel(in chan string) chan []string {
}()
return out
}
///////////////////////////////////////
///////////////////////////////////////
// withCancelCause returns a copy of parent with a new Done channel. The
// returned context's Done channel is closed when the returned cancel function
// is called or when the parent context's Done channel is closed, whichever
// happens first. Unlike the conext package's WithCancel, the cancel func takes
// an error, and will return that error on subsequent calls to Err().
func withCancelCause(parent context.Context) (context.Context, cancelErrFunc) {
parent, cancel := context.WithCancel(parent)
ctx := &contextWithCancelCause{
Context: parent,
}
return ctx, func(err error) {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil && parent.Err() == nil {
ctx.err = err
}
cancel()
}
}
type cancelErrFunc func(err error)
type contextWithCancelCause struct {
context.Context
m sync.Mutex
err error
}
func (ctx *contextWithCancelCause) Err() error {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil {
return ctx.Context.Err()
}
return ctx.err
}

220
internal/appmain/appmain.go Normal file
View File

@ -0,0 +1,220 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package appmain contains the common application initialization code for Open Match servers.
package appmain
import (
"context"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"go.opencensus.io/stats/view"
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/telemetry"
)
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "app.main",
})
)
// RunApplication starts and runs the given application forever. For use in
// main functions to run the full application.
func RunApplication(serviceName string, bindService Bind) {
c := make(chan os.Signal, 1)
// SIGTERM is signaled by k8s when it wants a pod to stop.
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
readConfig := func() (config.View, error) {
return config.Read()
}
a, err := NewApplication(serviceName, bindService, readConfig, net.Listen)
if err != nil {
logger.Fatal(err)
}
<-c
err = a.Stop()
if err != nil {
logger.Fatal(err)
}
logger.Info("Application stopped successfully.")
}
// Bind is a function which starts an application, and binds it to serving.
type Bind func(p *Params, b *Bindings) error
// Params are inputs to starting an application.
type Params struct {
config config.View
serviceName string
}
// Config provides the configuration for the application.
func (p *Params) Config() config.View {
return p.config
}
// ServiceName is a name for the currently running binary specified by
// RunApplication.
func (p *Params) ServiceName() string {
return p.serviceName
}
// Bindings allows applications to bind various functions to the running servers.
type Bindings struct {
sp *rpc.ServerParams
a *App
firstErr error
}
// AddHealthCheckFunc allows an application to check if it is healthy, and
// contribute to the overall server health.
func (b *Bindings) AddHealthCheckFunc(f func(context.Context) error) {
b.sp.AddHealthCheckFunc(f)
}
// RegisterViews begins collecting data for the given views.
func (b *Bindings) RegisterViews(v ...*view.View) {
if err := view.Register(v...); err != nil {
if b.firstErr == nil {
b.firstErr = err
}
return
}
b.AddCloser(func() {
view.Unregister(v...)
})
}
// AddHandleFunc adds a protobuf service to the grpc server which is starting.
func (b *Bindings) AddHandleFunc(handlerFunc rpc.GrpcHandler, grpcProxyHandler rpc.GrpcProxyHandler) {
b.sp.AddHandleFunc(handlerFunc, grpcProxyHandler)
}
// TelemetryHandle adds a handler to the mux for serving debug info and metrics.
func (b *Bindings) TelemetryHandle(pattern string, handler http.Handler) {
b.sp.ServeMux.Handle(pattern, handler)
}
// TelemetryHandleFunc adds a handlerfunc to the mux for serving debug info and metrics.
func (b *Bindings) TelemetryHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
b.sp.ServeMux.HandleFunc(pattern, handler)
}
// AddCloser specifies a function to be called when the application is being
// stopped. Closers are called in reverse order.
func (b *Bindings) AddCloser(c func()) {
b.a.closers = append(b.a.closers, func() error {
c()
return nil
})
}
// AddCloserErr specifies a function to be called when the application is being
// stopped. Closers are called in reverse order. The first error returned by
// a closer will be logged.
func (b *Bindings) AddCloserErr(c func() error) {
b.a.closers = append(b.a.closers, c)
}
// App is used internally, and public only for apptest. Do not use, and use apptest instead.
type App struct {
closers []func() error
}
// NewApplication is used internally, and public only for apptest. Do not use, and use apptest instead.
func NewApplication(serviceName string, bindService Bind, getCfg func() (config.View, error), listen func(network, address string) (net.Listener, error)) (*App, error) {
a := &App{}
cfg, err := getCfg()
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot read configuration.")
}
logging.ConfigureLogging(cfg)
sp, err := rpc.NewServerParamsFromConfig(cfg, "api."+serviceName, listen)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatalf("cannot construct server.")
}
p := &Params{
config: cfg,
serviceName: serviceName,
}
b := &Bindings{
a: a,
sp: sp,
}
err = telemetry.Setup(p, b)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
err = bindService(p, b)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
if b.firstErr != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, b.firstErr
}
s := &rpc.Server{}
err = s.Start(sp)
if err != nil {
surpressedErr := a.Stop() // Don't care about additional errors stopping.
_ = surpressedErr
return nil, err
}
b.AddCloserErr(s.Stop)
return a, nil
}
// Stop is used internally, and public only for apptest. Do not use, and use apptest instead.
func (a *App) Stop() error {
// Use closers in reverse order: Since dependencies are created before
// their dependants, this helps ensure no dependencies are closed
// unexpectedly.
var firstErr error
for i := len(a.closers) - 1; i >= 0; i-- {
err := a.closers[i]()
if firstErr == nil {
firstErr = err
}
}
return firstErr
}

View File

@ -0,0 +1,156 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package apptest allows testing of binded services within memory.
package apptest
import (
"net"
"testing"
"github.com/pkg/errors"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
)
// ServiceName is a constant used for all in memory tests.
const ServiceName = "test"
// TestApp starts an application for testing. It will automatically stop after
// the test completes, and immediately fail the test if there is an error
// starting. The caller must provide the listers to use for the app, this way
// the listeners can use a random port, and set the proper values on the config.
func TestApp(t *testing.T, cfg config.View, listeners []net.Listener, binds ...appmain.Bind) {
ls, err := newListenerStorage(listeners)
if err != nil {
t.Fatal(err)
}
getCfg := func() (config.View, error) {
return cfg, nil
}
app, err := appmain.NewApplication(ServiceName, bindAll(binds), getCfg, ls.listen)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
err := app.Stop()
if err != nil {
t.Fatal(err)
}
})
}
// RunInCluster allows for running services during an in cluster e2e test.
// This is NOT for running the actual code under test, but instead allow running
// auxiliary services the code under test might call.
func RunInCluster(binds ...appmain.Bind) (func() error, error) {
readConfig := func() (config.View, error) {
return config.Read()
}
app, err := appmain.NewApplication(ServiceName, bindAll(binds), readConfig, net.Listen)
if err != nil {
return nil, err
}
return app.Stop, nil
}
func bindAll(binds []appmain.Bind) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
for _, bind := range binds {
bindErr := bind(p, b)
if bindErr != nil {
return bindErr
}
}
return nil
}
}
func newFullAddr(network, address string) (fullAddr, error) {
a := fullAddr{
network: network,
}
var err error
a.host, a.port, err = net.SplitHostPort(address)
if err != nil {
return fullAddr{}, err
}
// Usually listeners are started with an "unspecified" ip address, which has
// several equivalent forms: ":80", "0.0.0.0:80", "[::]:80". Even if the
// callers use the same form, the listeners may return a different form when
// asked for its address. So detect and revert to the simpler form.
if net.ParseIP(a.host).IsUnspecified() {
a.host = ""
}
return a, nil
}
type fullAddr struct {
network string
host string
port string
}
type listenerStorage struct {
l map[fullAddr]net.Listener
}
func newListenerStorage(listeners []net.Listener) (*listenerStorage, error) {
ls := &listenerStorage{
l: make(map[fullAddr]net.Listener),
}
for _, l := range listeners {
a, err := newFullAddr(l.Addr().Network(), l.Addr().String())
if err != nil {
return nil, err
}
ls.l[a] = l
}
return ls, nil
}
func (ls *listenerStorage) listen(network, address string) (net.Listener, error) {
a, err := newFullAddr(network, address)
if err != nil {
return nil, err
}
l, ok := ls.l[a]
if !ok {
return nil, errors.Errorf("Listener for \"%s\" was not passed to TestApp or was already used", address)
}
delete(ls.l, a)
return l, nil
}
// GRPCClient creates a new client which connects to the specified service. It
// immediately fails the test if there is an error, and will also automatically
// close after the test completes.
func GRPCClient(t *testing.T, cfg config.View, service string) *grpc.ClientConn {
conn, err := rpc.GRPCClientFromConfig(cfg, service)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
err := conn.Close()
if err != nil {
t.Fatal(err)
}
})
return conn
}

View File

@ -0,0 +1,62 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package contextcause
import (
"context"
"sync"
)
// WithCancelCause returns a copy of parent with a new Done channel. The
// returned context's Done channel is closed when the returned cancel function
// is called or when the parent context's Done channel is closed, whichever
// happens first. Unlike the conext package's WithCancel, the cancel func takes
// an error, and will return that error on subsequent calls to Err().
func WithCancelCause(parent context.Context) (context.Context, CancelErrFunc) {
parent, cancel := context.WithCancel(parent)
ctx := &contextWithCancelCause{
Context: parent,
}
return ctx, func(err error) {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil && parent.Err() == nil {
ctx.err = err
}
cancel()
}
}
// CancelErrFunc cancels a context simular to context.CancelFunc. However it
// indicates why the context was canceled with the provided error.
type CancelErrFunc func(err error)
type contextWithCancelCause struct {
context.Context
m sync.Mutex
err error
}
func (ctx *contextWithCancelCause) Err() error {
ctx.m.Lock()
defer ctx.m.Unlock()
if ctx.err == nil {
return ctx.Context.Err()
}
return ctx.err
}

View File

@ -0,0 +1,64 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package contextcause
import (
"context"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
var errExample = errors.New("errExample")
func TestCauseOverride(t *testing.T) {
parent, cancelParent := context.WithCancel(context.Background())
ctx, cancel := WithCancelCause(parent)
select {
case <-ctx.Done():
t.FailNow()
default:
}
cancel(errExample)
<-ctx.Done()
require.Equal(t, errExample, ctx.Err())
cancel(errors.New("second error"))
require.Equal(t, errExample, ctx.Err())
cancelParent()
require.Equal(t, errExample, ctx.Err())
}
func TestParentCanceledFirst(t *testing.T) {
parent, cancelParent := context.WithCancel(context.Background())
ctx, cancel := WithCancelCause(parent)
select {
case <-ctx.Done():
t.FailNow()
default:
}
cancelParent()
<-ctx.Done()
require.Equal(t, context.Canceled, ctx.Err())
cancel(errExample)
require.Equal(t, context.Canceled, ctx.Err())
}

View File

@ -24,7 +24,7 @@ import (
)
// Read sets default to a viper instance and read user config to override these defaults.
func Read() (View, error) {
func Read() (*viper.Viper, error) {
var err error
// read configs from config/default/matchmaker_config_default.yaml
// matchmaker_config_default provides default values for all of the possible tunnable parameters in Open Match

View File

@ -17,6 +17,7 @@ package rpc
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"testing"
@ -104,9 +105,8 @@ func runGrpcClientTests(t *testing.T, assert *assert.Assertions, cfg config.View
s := &Server{}
defer s.Stop()
waitForStart, err := s.Start(rpcParams)
err := s.Start(rpcParams)
assert.Nil(err)
waitForStart()
// Acquire grpc client
grpcConn, err := GRPCClientFromConfig(cfg, "test")
@ -129,9 +129,8 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := &Server{}
defer s.Stop()
waitForStart, err := s.Start(rpcParams)
err := s.Start(rpcParams)
assert.Nil(err)
waitForStart()
// Acquire http client
httpClient, baseURL, err := HTTPClientFromConfig(cfg, "test")
@ -160,15 +159,15 @@ func runHTTPClientTests(assert *assert.Assertions, cfg config.View, rpcParams *S
// Generate a config view and optional TLS key manifests (optional) for testing
func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool) (config.View, *ServerParams, func()) {
// Create netlisteners on random ports used for rpc serving
grpcLh := MustListen()
httpLh := MustListen()
rpcParams := NewServerParamsFromListeners(grpcLh, httpLh)
grpcL := MustListen()
httpL := MustListen()
rpcParams := NewServerParamsFromListeners(grpcL, httpL)
// Generate a config view with paths to the manifests
cfg := viper.New()
cfg.Set("test.hostname", "localhost")
cfg.Set("test.grpcport", grpcLh.Number())
cfg.Set("test.httpport", httpLh.Number())
cfg.Set("test.grpcport", MustGetPortNumber(grpcL))
cfg.Set("test.httpport", MustGetPortNumber(httpL))
// Create temporary TLS key files for testing
pubFile, err := ioutil.TempFile("", "pub*")
@ -177,8 +176,8 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
if tlsEnabled {
// Generate public and private key bytes
pubBytes, priBytes, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting([]string{
fmt.Sprintf("localhost:%d", grpcLh.Number()),
fmt.Sprintf("localhost:%d", httpLh.Number()),
fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL)),
fmt.Sprintf("localhost:%s", MustGetPortNumber(httpL)),
})
assert.Nil(err)
@ -195,6 +194,22 @@ func configureConfigAndKeysForTesting(assert *assert.Assertions, tlsEnabled bool
return cfg, rpcParams, func() { removeTempFile(assert, pubFile.Name()) }
}
func MustListen() net.Listener {
l, err := net.Listen("tcp", ":0")
if err != nil {
panic(err)
}
return l
}
func MustGetPortNumber(l net.Listener) string {
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
panic(err)
}
return port
}
func removeTempFile(assert *assert.Assertions, paths ...string) {
for _, path := range paths {
err := os.Remove(path)

View File

@ -16,10 +16,8 @@ package rpc
import (
"context"
"net/http"
"sync"
"net"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/pkg/errors"
@ -28,40 +26,28 @@ import (
)
type insecureServer struct {
grpcLh *ListenerHolder
grpcListener net.Listener
grpcServer *grpc.Server
httpLh *ListenerHolder
httpListener net.Listener
httpMux *http.ServeMux
proxyMux *runtime.ServeMux
httpServer *http.Server
}
func (s *insecureServer) start(params *ServerParams) (func(), error) {
var serverStartWaiter sync.WaitGroup
func (s *insecureServer) start(params *ServerParams) error {
s.httpMux = params.ServeMux
s.proxyMux = runtime.NewServeMux()
// Configure the gRPC server.
grpcListener, err := s.grpcLh.Obtain()
if err != nil {
return func() {}, errors.WithStack(err)
}
s.grpcListener = grpcListener
s.grpcServer = grpc.NewServer(newGRPCServerOptions(params)...)
// Bind gRPC handlers
for _, handlerFunc := range params.handlersForGrpc {
handlerFunc(s.grpcServer)
}
serverStartWaiter.Add(1)
go func() {
serverStartWaiter.Done()
serverLogger.Infof("Serving gRPC: %s", s.grpcLh.AddrString())
serverLogger.Infof("Serving gRPC: %s", s.grpcListener.Addr().String())
gErr := s.grpcServer.Serve(s.grpcListener)
if gErr != nil {
return
@ -69,21 +55,15 @@ func (s *insecureServer) start(params *ServerParams) (func(), error) {
}()
// Configure the HTTP proxy server.
httpListener, err := s.httpLh.Obtain()
if err != nil {
return func() {}, errors.WithStack(err)
}
s.httpListener = httpListener
// Bind gRPC handlers
ctx, cancel := context.WithCancel(context.Background())
for _, handlerFunc := range params.handlersForGrpcProxy {
dialOpts := newGRPCDialOptions(params.enableMetrics, params.enableRPCLogging, params.enableRPCPayloadLogging)
dialOpts = append(dialOpts, grpc.WithInsecure())
if err = handlerFunc(ctx, s.proxyMux, grpcListener.Addr().String(), dialOpts); err != nil {
if err := handlerFunc(ctx, s.proxyMux, s.grpcListener.Addr().String(), dialOpts); err != nil {
cancel()
return func() {}, errors.WithStack(err)
return errors.WithStack(err)
}
}
@ -93,38 +73,28 @@ func (s *insecureServer) start(params *ServerParams) (func(), error) {
Addr: s.httpListener.Addr().String(),
Handler: instrumentHTTPHandler(s.httpMux, params),
}
serverStartWaiter.Add(1)
go func() {
serverStartWaiter.Done()
serverLogger.Infof("Serving HTTP: %s", s.httpLh.AddrString())
serverLogger.Infof("Serving HTTP: %s", s.httpListener.Addr().String())
hErr := s.httpServer.Serve(s.httpListener)
defer cancel()
if hErr != nil {
serverLogger.Debugf("error closing gRPC server: %s", hErr)
if hErr != nil && hErr != http.ErrServerClosed {
serverLogger.Debugf("error serving HTTP: %s", hErr)
}
}()
return serverStartWaiter.Wait, nil
return nil
}
func (s *insecureServer) stop() {
s.grpcServer.Stop()
if err := s.grpcListener.Close(); err != nil {
serverLogger.Debugf("error closing gRPC listener: %s", err)
}
if err := s.httpServer.Close(); err != nil {
serverLogger.Debugf("error closing HTTP server: %s", err)
}
if err := s.httpListener.Close(); err != nil {
serverLogger.Debugf("error closing HTTP listener: %s", err)
}
func (s *insecureServer) stop() error {
// the servers also close their respective listeners.
err := s.httpServer.Shutdown(context.Background())
s.grpcServer.GracefulStop()
return err
}
func newInsecureServer(grpcLh *ListenerHolder, httpLh *ListenerHolder) *insecureServer {
func newInsecureServer(grpcL, httpL net.Listener) *insecureServer {
return &insecureServer{
grpcLh: grpcLh,
httpLh: httpLh,
grpcListener: grpcL,
httpListener: httpL,
}
}

View File

@ -29,25 +29,24 @@ import (
func TestInsecureStartStop(t *testing.T) {
assert := assert.New(t)
grpcLh := MustListen()
httpLh := MustListen()
grpcL := MustListen()
httpL := MustListen()
ff := &shellTesting.FakeFrontend{}
params := NewServerParamsFromListeners(grpcLh, httpLh)
params := NewServerParamsFromListeners(grpcL, httpL)
params.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, ff)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := newInsecureServer(grpcLh, httpLh)
s := newInsecureServer(grpcL, httpL)
defer s.stop()
waitForStart, err := s.start(params)
err := s.start(params)
assert.Nil(err)
waitForStart()
conn, err := grpc.Dial(fmt.Sprintf(":%d", grpcLh.Number()), grpc.WithInsecure())
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
assert.Nil(err)
defer conn.Close()
endpoint := fmt.Sprintf("http://localhost:%d", httpLh.Number())
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
httpClient := &http.Client{
Timeout: time.Second,
}

View File

@ -1,106 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpc
import (
"fmt"
"net"
"sync"
"github.com/pkg/errors"
)
// ListenerHolder holds an opened port that can only be handed off to 1 go routine.
type ListenerHolder struct {
number int
listener net.Listener
addr string
sync.RWMutex
}
// Obtain returns the TCP listener. This method can only be called once and is thread-safe.
func (lh *ListenerHolder) Obtain() (net.Listener, error) {
lh.Lock()
defer lh.Unlock()
listener := lh.listener
lh.listener = nil
if listener == nil {
return nil, errors.WithStack(fmt.Errorf("cannot Obtain() listener for %d because already handed off", lh.number))
}
return listener, nil
}
// Number returns the port number.
func (lh *ListenerHolder) Number() int {
return lh.number
}
// AddrString returns the address of the serving port.
// Use this over fmt.Sprintf(":%d", lh.Number()) because the address is represented differently in
// systems that prefer IPv4 and IPv6.
func (lh *ListenerHolder) AddrString() string {
return lh.addr
}
// Close shutsdown the TCP listener.
func (lh *ListenerHolder) Close() error {
lh.Lock()
defer lh.Unlock()
if lh.listener != nil {
err := lh.listener.Close()
lh.listener = nil
return err
}
return nil
}
// newFromPortNumber opens a TCP listener based on the port number provided.
func newFromPortNumber(portNumber int) (*ListenerHolder, error) {
addr := ""
// port 0 actually means random port which should only be used in tests.
if portNumber == 0 {
// Only accept connections from localhost in test mode.
addr = fmt.Sprintf("localhost:%d", portNumber)
} else {
addr = fmt.Sprintf(":%d", portNumber)
}
conn, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
tcpConn, ok := conn.Addr().(*net.TCPAddr)
if !ok || tcpConn == nil {
return nil, fmt.Errorf("net.Listen(\"tcp\", %s) did not return a *net.TCPAddr", addr)
}
return &ListenerHolder{
number: tcpConn.Port,
listener: conn,
addr: conn.Addr().String(),
}, nil
}
// MustListen finds the next available port to open for TCP connections, used in tests to make them isolated.
func MustListen() *ListenerHolder {
// Port 0 in Go is a special port number to randomly choose an available port.
// Reference, https://golang.org/pkg/net/#ListenTCP.
lh, err := newFromPortNumber(0)
if err != nil {
panic(err)
}
return lh
}

View File

@ -1,104 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpc
import (
"fmt"
"strings"
"sync"
"sync/atomic"
"testing"
)
const (
numIterations = 1000
)
// TestAddrString verifies that AddrString() is consistent with the port's Addr().String() value.
func TestAddrString(t *testing.T) {
lh, err := newFromPortNumber(0)
if err != nil {
t.Fatalf("newFromPortNumber(0) had error, %s", err)
}
if !strings.HasSuffix(lh.AddrString(), fmt.Sprintf(":%d", lh.Number())) {
t.Errorf("%s does not have suffix ':%d'", lh.AddrString(), lh.Number())
}
port, err := lh.Obtain()
defer func() {
err = port.Close()
if err != nil {
t.Errorf("error %s while calling port.Close()", err)
}
}()
if err != nil {
t.Errorf("error %s while calling lh.Obtain", err)
}
if port.Addr().String() != lh.AddrString() {
t.Errorf("port.Addr().String() = %s should match lh.AddrString() = %s", port.Addr().String(), lh.AddrString())
}
}
// TestObtain verifies that a ListenerHolder only returns Obtain() once.
func TestObtain(t *testing.T) {
var errCount uint64
var obtainCount uint64
lh, err := newFromPortNumber(0)
if err != nil {
t.Fatalf("newFromPortNumber(0) had error, %s", err)
}
var wg sync.WaitGroup
for i := 0; i < numIterations; i++ {
wg.Add(1)
go func() {
listener, err := lh.Obtain()
if err != nil {
atomic.AddUint64(&errCount, 1)
}
if listener != nil {
atomic.AddUint64(&obtainCount, 1)
}
if err != nil && listener != nil {
t.Error("err and listener were both nil.")
}
wg.Done()
}()
}
wg.Wait()
finalErrCount := atomic.LoadUint64(&errCount)
finalObtainCount := atomic.LoadUint64(&obtainCount)
if finalErrCount != numIterations-1 {
t.Errorf("expected %d errors, got %d", numIterations-1, finalErrCount)
}
if finalObtainCount != 1 {
t.Errorf("expected %d obtains, got %d", 1, finalObtainCount)
}
}
func TestMustListen(t *testing.T) {
for i := 0; i < numIterations; i++ {
testName := fmt.Sprintf("[%d] MustListen", i)
t.Run(testName, func(t *testing.T) {
t.Parallel()
lh := MustListen()
defer lh.Close()
if lh.Number() <= 0 {
t.Errorf("Expected %d > 0, port is out of range.", lh.Number())
}
})
}
}

View File

@ -18,6 +18,7 @@ import (
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"time"
@ -37,7 +38,6 @@ import (
"google.golang.org/grpc/keepalive"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/signal"
"open-match.dev/open-match/internal/telemetry"
)
@ -69,8 +69,8 @@ type ServerParams struct {
handlersForGrpcProxy []GrpcProxyHandler
handlersForHealthCheck []func(context.Context) error
grpcListener *ListenerHolder
grpcProxyListener *ListenerHolder
grpcListener net.Listener
grpcProxyListener net.Listener
// Root CA public certificate in PEM format.
rootCaPublicCertificateFileData []byte
@ -83,28 +83,22 @@ type ServerParams struct {
enableRPCLogging bool
enableRPCPayloadLogging bool
enableMetrics bool
closer func()
}
// NewServerParamsFromConfig returns server Params initialized from the configuration file.
func NewServerParamsFromConfig(cfg config.View, prefix string) (*ServerParams, error) {
grpcLh, err := newFromPortNumber(cfg.GetInt(prefix + ".grpcport"))
func NewServerParamsFromConfig(cfg config.View, prefix string, listen func(network, address string) (net.Listener, error)) (*ServerParams, error) {
grpcL, err := listen("tcp", fmt.Sprintf(":%d", cfg.GetInt(prefix+".grpcport")))
if err != nil {
serverLogger.Fatal(err)
return nil, err
return nil, errors.Wrap(err, "can't start listener for grpc")
}
httpLh, err := newFromPortNumber(cfg.GetInt(prefix + ".httpport"))
httpL, err := listen("tcp", fmt.Sprintf(":%d", cfg.GetInt(prefix+".httpport")))
if err != nil {
closeErr := grpcLh.Close()
if closeErr != nil {
serverLogger.WithFields(logrus.Fields{
"error": closeErr.Error(),
}).Info("failed to gRPC close port")
}
serverLogger.Fatal(err)
return nil, err
surpressedErr := grpcL.Close() // Don't care about additional errors when stopping.
_ = surpressedErr
return nil, errors.Wrap(err, "can't start listener for http")
}
p := NewServerParamsFromListeners(grpcLh, httpLh)
p := NewServerParamsFromListeners(grpcL, httpL)
certFile := cfg.GetString(configNameServerPublicCertificateFile)
privateKeyFile := cfg.GetString(configNameServerPrivateKeyFile)
@ -138,20 +132,18 @@ func NewServerParamsFromConfig(cfg config.View, prefix string) (*ServerParams, e
p.enableMetrics = cfg.GetBool(telemetry.ConfigNameEnableMetrics)
p.enableRPCLogging = cfg.GetBool(ConfigNameEnableRPCLogging)
p.enableRPCPayloadLogging = logging.IsDebugEnabled(cfg)
// TODO: This isn't ideal since telemetry requires config for it to be initialized.
// This forces us to initialize readiness probes earlier than necessary.
p.closer = telemetry.Setup(prefix, p.ServeMux, cfg)
return p, nil
}
// NewServerParamsFromListeners returns server Params initialized with the ListenerHolder variables.
func NewServerParamsFromListeners(grpcLh *ListenerHolder, proxyLh *ListenerHolder) *ServerParams {
func NewServerParamsFromListeners(grpcL net.Listener, proxyL net.Listener) *ServerParams {
return &ServerParams{
ServeMux: http.NewServeMux(),
handlersForGrpc: []GrpcHandler{},
handlersForGrpcProxy: []GrpcProxyHandler{},
grpcListener: grpcLh,
grpcProxyListener: proxyLh,
grpcListener: grpcL,
grpcProxyListener: proxyL,
}
}
@ -202,68 +194,27 @@ func (p *ServerParams) invalidate() {
// All HTTP traffic is served from a common http.ServeMux.
type Server struct {
serverWithProxy grpcServerWithProxy
closer func()
}
// grpcServerWithProxy this will go away when insecure.go and tls.go are merged into the same server.
type grpcServerWithProxy interface {
start(*ServerParams) (func(), error)
stop()
start(*ServerParams) error
stop() error
}
// Start the gRPC+HTTP(s) REST server.
func (s *Server) Start(p *ServerParams) (func(), error) {
func (s *Server) Start(p *ServerParams) error {
if p.usingTLS() {
s.serverWithProxy = newTLSServer(p.grpcListener, p.grpcProxyListener)
} else {
s.serverWithProxy = newInsecureServer(p.grpcListener, p.grpcProxyListener)
}
s.closer = p.closer
return s.serverWithProxy.start(p)
}
// Stop the gRPC+HTTP(s) REST server.
func (s *Server) Stop() {
s.serverWithProxy.stop()
if s.closer != nil {
s.closer()
}
}
// startServingIndefinitely creates a server based on the params and begins serving the gRPC and HTTP proxy.
// It returns waitUntilKilled() which will wait indefinitely until crash or Ctrl+C is pressed.
// forceStopServingFunc() is also returned which is used to force kill the server for tests.
func startServingIndefinitely(params *ServerParams) (func(), func(), error) {
s := &Server{}
// Start serving traffic.
waitForStart, err := s.Start(params)
if err != nil {
serverLogger.WithFields(logrus.Fields{
"error": err.Error(),
}).Fatal("Failed to start gRPC and HTTP servers.")
return func() {}, func() {}, err
}
serverLogger.Info("Server has started.")
// Exit when we see a signal
waitUntilKilled, forceStopServingFunc := signal.New()
waitForStart()
serveUntilKilledFunc := func() {
waitUntilKilled()
s.Stop()
serverLogger.Info("Shutting down server")
}
return serveUntilKilledFunc, forceStopServingFunc, nil
}
// MustServeForever is a convenience method for starting a server and running it indefinitely.
func MustServeForever(params *ServerParams) {
serveUntilKilledFunc, _, err := startServingIndefinitely(params)
if err != nil {
return
}
serveUntilKilledFunc()
func (s *Server) Stop() error {
return s.serverWithProxy.stop()
}
type loggingHTTPHandler struct {

View File

@ -32,25 +32,24 @@ import (
func TestStartStopServer(t *testing.T) {
assert := assert.New(t)
grpcLh := MustListen()
httpLh := MustListen()
grpcL := MustListen()
httpL := MustListen()
ff := &shellTesting.FakeFrontend{}
params := NewServerParamsFromListeners(grpcLh, httpLh)
params := NewServerParamsFromListeners(grpcL, httpL)
params.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, ff)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
s := &Server{}
defer s.Stop()
waitForStart, err := s.Start(params)
assert.Nil(err)
waitForStart()
conn, err := grpc.Dial(fmt.Sprintf(":%d", grpcLh.Number()), grpc.WithInsecure())
err := s.Start(params)
assert.Nil(err)
endpoint := fmt.Sprintf("http://localhost:%d", httpLh.Number())
conn, err := grpc.Dial(fmt.Sprintf(":%s", MustGetPortNumber(grpcL)), grpc.WithInsecure())
assert.Nil(err)
endpoint := fmt.Sprintf("http://localhost:%s", MustGetPortNumber(httpL))
httpClient := &http.Client{
Timeout: time.Second,
}
@ -58,29 +57,6 @@ func TestStartStopServer(t *testing.T) {
runGrpcWithProxyTests(t, assert, s.serverWithProxy, conn, httpClient, endpoint)
}
func TestMustServeForever(t *testing.T) {
assert := assert.New(t)
grpcLh := MustListen()
httpLh := MustListen()
ff := &shellTesting.FakeFrontend{}
params := NewServerParamsFromListeners(grpcLh, httpLh)
params.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, ff)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
serveUntilKilledFunc, stopServingFunc, err := startServingIndefinitely(params)
assert.Nil(err)
go func() {
// Wait for 500ms before killing the server.
// It really doesn't matter if it actually comes up.
// We just care that the server can respect an unexpected shutdown quickly after starting.
time.Sleep(time.Millisecond * 500)
stopServingFunc()
}()
serveUntilKilledFunc()
// This test will intentionally deadlock if the stop function is not respected.
}
func runGrpcWithProxyTests(t *testing.T, assert *assert.Assertions, s grpcServerWithProxy, conn *grpc.ClientConn, httpClient *http.Client, endpoint string) {
ctx := utilTesting.NewContext(t)
feClient := pb.NewFrontendServiceClient(conn)

View File

@ -1,187 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testing
import (
"context"
"fmt"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"testing"
"time"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/util"
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
)
// MustServe creates a test server and returns TestContext that can be used to create clients.
// This method pseudorandomly selects insecure and TLS mode to ensure both paths work.
func MustServe(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
if r.Intn(2) == 0 {
return MustServeInsecure(t, binder)
}
return MustServeTLS(t, binder)
}
// MustServeInsecure creates a test server without transport encryption and returns TestContext that can be used to create clients.
func MustServeInsecure(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
grpcLh := rpc.MustListen()
proxyLh := rpc.MustListen()
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
p := rpc.NewServerParamsFromListeners(grpcLh, proxyLh)
s := bindAndStart(t, p, binder)
return &TestContext{
t: t,
s: s,
grpcAddress: grpcAddress,
proxyAddress: proxyAddress,
mc: util.NewMultiClose(),
}
}
// MustServeTLS creates a test server with TLS and returns TestContext that can be used to create clients.
func MustServeTLS(t *testing.T, binder func(*rpc.ServerParams)) *TestContext {
grpcLh := rpc.MustListen()
proxyLh := rpc.MustListen()
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
pub, priv, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting([]string{grpcAddress, proxyAddress})
if err != nil {
t.Fatalf("cannot create certificates %v", err)
}
p := rpc.NewServerParamsFromListeners(grpcLh, proxyLh)
p.SetTLSConfiguration(pub, pub, priv)
s := bindAndStart(t, p, binder)
return &TestContext{
t: t,
s: s,
grpcAddress: grpcAddress,
proxyAddress: proxyAddress,
trustedCertificate: pub,
mc: util.NewMultiClose(),
}
}
func bindAndStart(t *testing.T, p *rpc.ServerParams, binder func(*rpc.ServerParams)) *rpc.Server {
binder(p)
s := &rpc.Server{}
waitForStart, err := s.Start(p)
if err != nil {
t.Fatalf("failed to start server, %v", err)
}
waitForStart()
return s
}
// TestContext provides methods to interact with the Open Match server.
type TestContext struct {
t *testing.T
s *rpc.Server
grpcAddress string
proxyAddress string
trustedCertificate []byte
mc *util.MultiClose
}
// AddCloseFunc adds a close function.
func (tc *TestContext) AddCloseFunc(closer func()) {
tc.mc.AddCloseFunc(closer)
}
// Close shutsdown the server and frees the TCP port.
func (tc *TestContext) Close() {
tc.mc.Close()
tc.s.Stop()
}
// Context returns a context appropriate for calling an RPC.
func (tc *TestContext) Context() context.Context {
return context.Background()
}
// MustGRPC returns a grpc client configured to connect to an endpoint.
func (tc *TestContext) MustGRPC() *grpc.ClientConn {
conn, err := rpc.GRPCClientFromParams(tc.newClientParams(tc.grpcAddress))
if err != nil {
tc.t.Fatal(err)
}
return conn
}
// MustHTTP returns a HTTP(S) client configured to connect to an endpoint.
func (tc *TestContext) MustHTTP() (*http.Client, string) {
client, endpoint, err := rpc.HTTPClientFromParams(tc.newClientParams(tc.proxyAddress))
if err != nil {
tc.t.Fatal(err)
}
return client, endpoint
}
func (tc *TestContext) newClientParams(address string) *rpc.ClientParams {
return &rpc.ClientParams{
Address: address,
TrustedCertificate: tc.trustedCertificate,
EnableRPCLogging: true,
EnableRPCPayloadLogging: true,
EnableMetrics: false,
}
}
// GetHostname returns the hostname of current text context
func (tc *TestContext) GetHostname() string {
return "localhost"
}
// GetHTTPPort returns the http proxy port of current text context
func (tc *TestContext) GetHTTPPort() int {
_, port := hostnameAndPort(tc.t, tc.proxyAddress)
return port
}
// GetGRPCPort returns the grpc service port of current text context
func (tc *TestContext) GetGRPCPort() int {
_, port := hostnameAndPort(tc.t, tc.grpcAddress)
return port
}
func hostnameAndPort(t *testing.T, address string) (string, int) {
// Coerce to a url.
if !strings.Contains(address, "://") {
address = "http://" + address
}
address = strings.Replace(address, "[::]", "localhost", -1)
u, err := url.Parse(address)
if err != nil {
t.Fatalf("cannot parse address %s, %v", address, err)
}
port, err := strconv.Atoi(u.Port())
if err != nil {
t.Fatalf("cannot convert port number %s, %v", u.Port(), err)
}
return u.Hostname(), port
}

View File

@ -1,66 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testing
import (
"strings"
"testing"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/pkg/pb"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
shellTesting "open-match.dev/open-match/internal/testing"
)
// TestMustServerParamsForTesting verifies that a server can stand up in (insecure or TLS) mode.
func TestMustServe(t *testing.T) {
runMustServeTest(t, MustServe)
}
// TestMustServerParamsForTesting verifies that a server can stand up in insecure mode.
func TestMustServeInsecure(t *testing.T) {
runMustServeTest(t, MustServeInsecure)
}
// TestMustServerParamsForTesting verifies that a server can stand up in TLS mode.
func TestMustServeTLS(t *testing.T) {
runMustServeTest(t, MustServeTLS)
}
func runMustServeTest(t *testing.T, mustServeFunc func(*testing.T, func(*rpc.ServerParams)) *TestContext) {
assert := assert.New(t)
ff := &shellTesting.FakeFrontend{}
tc := mustServeFunc(t, func(spf *rpc.ServerParams) {
spf.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, ff)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
})
defer tc.Close()
conn := tc.MustGRPC()
c := pb.NewFrontendServiceClient(conn)
resp, err := c.CreateTicket(tc.Context(), &pb.CreateTicketRequest{})
assert.Nil(err)
assert.NotNil(resp)
hc, endpoint := tc.MustHTTP()
hResp, err := hc.Post(endpoint+"/v1/frontendservice/tickets", "application/json", strings.NewReader("{}"))
assert.Nil(err)
if hResp != nil {
assert.Equal(200, hResp.StatusCode)
}
}

View File

@ -16,12 +16,10 @@ package rpc
import (
"context"
"fmt"
"net/http"
"sync"
"crypto/tls"
"fmt"
"net"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/pkg/errors"
@ -36,43 +34,37 @@ const (
)
type tlsServer struct {
grpcLh *ListenerHolder
grpcListener net.Listener
grpcServer *grpc.Server
httpLh *ListenerHolder
httpListener net.Listener
httpMux *http.ServeMux
proxyMux *runtime.ServeMux
httpServer *http.Server
}
func (s *tlsServer) start(params *ServerParams) (func(), error) {
var serverStartWaiter sync.WaitGroup
func (s *tlsServer) start(params *ServerParams) error {
s.httpMux = params.ServeMux
s.proxyMux = runtime.NewServeMux()
grpcAddress := fmt.Sprintf("localhost:%d", s.grpcLh.Number())
grpcListener, err := s.grpcLh.Obtain()
_, grpcPort, err := net.SplitHostPort(s.grpcListener.Addr().String())
if err != nil {
return func() {}, errors.WithStack(err)
return err
}
s.grpcListener = grpcListener
grpcAddress := fmt.Sprintf("localhost:%s", grpcPort)
rootCaCert, err := trustedCertificateFromFileData(params.rootCaPublicCertificateFileData)
if err != nil {
return func() {}, errors.WithStack(err)
return errors.WithStack(err)
}
certPoolForGrpcEndpoint, err := trustedCertificateFromFileData(params.publicCertificateFileData)
if err != nil {
return func() {}, errors.WithStack(err)
return errors.WithStack(err)
}
grpcTLSCertificate, err := certificateFromFileData(params.publicCertificateFileData, params.privateKeyFileData)
if err != nil {
return func() {}, errors.WithStack(err)
return errors.WithStack(err)
}
creds := credentials.NewServerTLSFromCert(grpcTLSCertificate)
serverOpts := newGRPCServerOptions(params)
@ -84,10 +76,8 @@ func (s *tlsServer) start(params *ServerParams) (func(), error) {
handlerFunc(s.grpcServer)
}
serverStartWaiter.Add(1)
go func() {
serverStartWaiter.Done()
serverLogger.Infof("Serving gRPC-TLS: %s", s.grpcLh.AddrString())
serverLogger.Infof("Serving gRPC-TLS: %s", s.grpcListener.Addr().String())
gErr := s.grpcServer.Serve(s.grpcListener)
if gErr != nil {
serverLogger.Debugf("error closing gRPC-TLS server: %s", gErr)
@ -95,11 +85,6 @@ func (s *tlsServer) start(params *ServerParams) (func(), error) {
}()
// Start HTTP server
httpListener, err := s.httpLh.Obtain()
if err != nil {
return func() {}, errors.WithStack(err)
}
s.httpListener = httpListener
// Bind gRPC handlers
ctx, cancel := context.WithCancel(context.Background())
@ -109,7 +94,7 @@ func (s *tlsServer) start(params *ServerParams) (func(), error) {
for _, handlerFunc := range params.handlersForGrpcProxy {
if err = handlerFunc(ctx, s.proxyMux, grpcAddress, httpsToGrpcProxyOptions); err != nil {
cancel()
return func() {}, errors.WithStack(err)
return errors.WithStack(err)
}
}
@ -127,40 +112,29 @@ func (s *tlsServer) start(params *ServerParams) (func(), error) {
NextProtos: []string{http2WithTLSVersionID}, // https://github.com/grpc-ecosystem/grpc-gateway/issues/220
},
}
serverStartWaiter.Add(1)
go func() {
serverStartWaiter.Done()
tlsListener := tls.NewListener(s.httpListener, s.httpServer.TLSConfig)
serverLogger.Infof("Serving HTTPS: %s", s.httpLh.AddrString())
serverLogger.Infof("Serving HTTPS: %s", s.httpListener.Addr().String())
hErr := s.httpServer.Serve(tlsListener)
defer cancel()
if hErr != nil {
serverLogger.Debugf("error closing server: %s", hErr)
if hErr != nil && hErr != http.ErrServerClosed {
serverLogger.Debugf("error serving HTTP: %s", hErr)
}
}()
// Wait for the servers to come up.
return serverStartWaiter.Wait, nil
return nil
}
func (s *tlsServer) stop() {
s.grpcServer.Stop()
if err := s.grpcListener.Close(); err != nil {
serverLogger.Debugf("error closing gRPC-TLS listener: %s", err)
}
if err := s.httpServer.Close(); err != nil {
serverLogger.Debugf("error closing HTTPS server: %s", err)
}
if err := s.httpListener.Close(); err != nil {
serverLogger.Debugf("error closing HTTPS listener: %s", err)
}
func (s *tlsServer) stop() error {
// the servers also close their respective listeners.
err := s.httpServer.Shutdown(context.Background())
s.grpcServer.GracefulStop()
return err
}
func newTLSServer(grpcLh *ListenerHolder, httpLh *ListenerHolder) *tlsServer {
func newTLSServer(grpcL, httpL net.Listener) *tlsServer {
return &tlsServer{
grpcLh: grpcLh,
httpLh: httpLh,
grpcListener: grpcL,
httpListener: httpL,
}
}

View File

@ -17,6 +17,7 @@ package rpc
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"testing"
"time"
@ -32,10 +33,10 @@ import (
// TestStartStopTlsServerWithCARootedCertificate verifies that we can have a gRPC+TLS+HTTPS server/client work with a single self-signed certificate.
func TestStartStopTlsServerWithSingleCertificate(t *testing.T) {
assert := assert.New(t)
grpcLh := MustListen()
proxyLh := MustListen()
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
grpcL := MustListen()
proxyL := MustListen()
grpcAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL))
proxyAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(proxyL))
allHostnames := []string{grpcAddress, proxyAddress}
pub, priv, err := certgenTesting.CreateCertificateAndPrivateKeyForTesting(allHostnames)
assert.Nil(err)
@ -44,8 +45,8 @@ func TestStartStopTlsServerWithSingleCertificate(t *testing.T) {
rootPrivateKeyFileData: priv,
publicCertificateFileData: pub,
privateKeyFileData: priv,
grpcLh: grpcLh,
proxyLh: proxyLh,
grpcL: grpcL,
proxyL: proxyL,
grpcAddress: grpcAddress,
proxyAddress: proxyAddress,
})
@ -54,10 +55,10 @@ func TestStartStopTlsServerWithSingleCertificate(t *testing.T) {
// TestStartStopTlsServerWithCARootedCertificate verifies that we can have a gRPC+TLS+HTTPS server/client work with a self-signed CA-rooted certificate.
func TestStartStopTlsServerWithCARootedCertificate(t *testing.T) {
assert := assert.New(t)
grpcLh := MustListen()
proxyLh := MustListen()
grpcAddress := fmt.Sprintf("localhost:%d", grpcLh.Number())
proxyAddress := fmt.Sprintf("localhost:%d", proxyLh.Number())
grpcL := MustListen()
proxyL := MustListen()
grpcAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(grpcL))
proxyAddress := fmt.Sprintf("localhost:%s", MustGetPortNumber(proxyL))
allHostnames := []string{grpcAddress, proxyAddress}
rootPub, rootPriv, err := certgenTesting.CreateRootCertificateAndPrivateKeyForTesting(allHostnames)
assert.Nil(err)
@ -70,8 +71,8 @@ func TestStartStopTlsServerWithCARootedCertificate(t *testing.T) {
rootPrivateKeyFileData: rootPriv,
publicCertificateFileData: pub,
privateKeyFileData: priv,
grpcLh: grpcLh,
proxyLh: proxyLh,
grpcL: grpcL,
proxyL: proxyL,
grpcAddress: grpcAddress,
proxyAddress: proxyAddress,
})
@ -82,8 +83,8 @@ type tlsServerTestParams struct {
rootPrivateKeyFileData []byte
publicCertificateFileData []byte
privateKeyFileData []byte
grpcLh *ListenerHolder
proxyLh *ListenerHolder
grpcL net.Listener
proxyL net.Listener
grpcAddress string
proxyAddress string
}
@ -93,7 +94,7 @@ func runTestStartStopTLSServer(t *testing.T, tp *tlsServerTestParams) {
ff := &shellTesting.FakeFrontend{}
serverParams := NewServerParamsFromListeners(tp.grpcLh, tp.proxyLh)
serverParams := NewServerParamsFromListeners(tp.grpcL, tp.proxyL)
serverParams.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterFrontendServiceServer(s, ff)
}, pb.RegisterFrontendServiceHandlerFromEndpoint)
@ -102,9 +103,8 @@ func runTestStartStopTLSServer(t *testing.T, tp *tlsServerTestParams) {
s := newTLSServer(serverParams.grpcListener, serverParams.grpcProxyListener)
defer s.stop()
waitForStart, err := s.start(serverParams)
err := s.start(serverParams)
assert.Nil(err)
waitForStart()
pool, err := trustedCertificateFromFileData(tp.rootPublicCertificateFileData)
assert.Nil(err)

View File

@ -1,38 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package signal handles terminating applications on Ctrl+Break.
package signal
import (
"os"
"os/signal"
)
// New waits for a manual termination or a user initiated termination IE: Ctrl+Break.
// waitForFunc() will wait indefinitely for a signal.
// terminateFunc() will trigger waitForFunc() to complete immediately.
func New() (waitForFunc func(), terminateFunc func()) {
// Exit when we see a signal
terminate := make(chan os.Signal, 1)
signal.Notify(terminate, os.Interrupt)
waitForFunc = func() {
<-terminate
}
terminateFunc = func() {
terminate <- os.Interrupt
close(terminate)
}
return waitForFunc, terminateFunc
}

View File

@ -1,89 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package signal
import (
"testing"
"time"
)
const (
defaultTimeout = time.Duration(100) * time.Millisecond
)
func TestOneSignal(t *testing.T) {
wait, terminate := New()
verifyWait(t, wait, terminate)
}
func TestTwoSignals(t *testing.T) {
wait, terminate := New()
wait2, terminate2 := New()
if waitWithTimeout(wait)() {
t.Error("wait should have timed out because terminate() was not called.")
}
if waitWithTimeout(wait2)() {
t.Error("wait2 should have timed out because terminate2() was not called.")
}
terminate()
if !waitWithTimeout(wait)() {
t.Error("wait should have completed because terminate() was called.")
}
if waitWithTimeout(wait2)() {
t.Error("wait2 should have timed out because terminate2() was not called.")
}
terminate2()
if !waitWithTimeout(wait)() {
t.Error("wait should have completed because terminate() was called prior.")
}
if !waitWithTimeout(wait2)() {
t.Error("wait2 should have timed out because terminate2() was called.")
}
}
func waitWithTimeout(wait func()) func() bool {
waiter := make(chan struct{})
go func() {
defer func() { waiter <- struct{}{} }()
wait()
}()
return func() bool {
select {
case <-waiter:
return true
case <-time.After(defaultTimeout):
return false
}
}
}
func verifyWait(t *testing.T, wait func(), terminate func()) {
waiter := make(chan struct{})
go func() {
defer func() { waiter <- struct{}{} }()
wait()
}()
terminate()
select {
case <-waiter:
// Success
case <-time.After(defaultTimeout):
t.Error("WaitGroup did not complete within 1 second.")
}
}

View File

@ -18,127 +18,91 @@ import (
"context"
"go.opencensus.io/trace"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
var (
mStateStoreCreateTicketCount = telemetry.Counter("statestore/createticketcount", "number of tickets created")
mStateStoreGetTicketCount = telemetry.Counter("statestore/getticketcount", "number of tickets retrieved")
mStateStoreDeleteTicketCount = telemetry.Counter("statestore/deleteticketcount", "number of tickets deleted")
mStateStoreIndexTicketCount = telemetry.Counter("statestore/indexticketcount", "number of tickets indexed")
mStateStoreDeindexTicketCount = telemetry.Counter("statestore/deindexticketcount", "number of tickets deindexed")
mStateStoreGetTicketsCount = telemetry.Counter("statestore/getticketscount", "number of bulk ticket retrievals")
mStateStoreGetIndexedIDSetCount = telemetry.Counter("statestore/getindexedidsetcount", "number of bulk indexed id retrievals")
mStateStoreUpdateAssignmentsCount = telemetry.Counter("statestore/updateassignmentcount", "number of tickets assigned")
mStateStoreGetAssignmentsCount = telemetry.Counter("statestore/getassignmentscount", "number of ticket assigned retrieved")
mStateStoreAddTicketsToIgnoreListCount = telemetry.Counter("statestore/addticketstoignorelistcount", "number of tickets moved to ignore list")
mStateStoreDeleteTicketFromIgnoreListCount = telemetry.Counter("statestore/deleteticketfromignorelistcount", "number of tickets removed from ignore list")
)
// instrumentedService is a wrapper for a statestore service that provides instrumentation (metrics and tracing) of the database.
type instrumentedService struct {
s Service
}
// Close the connection to the database.
func (is *instrumentedService) Close() error {
return is.s.Close()
}
// HealthCheck indicates if the database is reachable.
func (is *instrumentedService) HealthCheck(ctx context.Context) error {
err := is.s.HealthCheck(ctx)
return err
}
// CreateTicket creates a new Ticket in the state storage. If the id already exists, it will be overwritten.
func (is *instrumentedService) CreateTicket(ctx context.Context, ticket *pb.Ticket) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.CreateTicket")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreCreateTicketCount)
return is.s.CreateTicket(ctx, ticket)
}
// GetTicket gets the Ticket with the specified id from state storage. This method fails if the Ticket does not exist.
func (is *instrumentedService) GetTicket(ctx context.Context, id string) (*pb.Ticket, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetTicket")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetTicketCount)
return is.s.GetTicket(ctx, id)
}
// DeleteTicket removes the Ticket with the specified id from state storage.
func (is *instrumentedService) DeleteTicket(ctx context.Context, id string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteTicket")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreDeleteTicketCount)
return is.s.DeleteTicket(ctx, id)
}
// IndexTicket indexes the Ticket id for the configured index fields.
func (is *instrumentedService) IndexTicket(ctx context.Context, ticket *pb.Ticket) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.IndexTicket")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreIndexTicketCount)
return is.s.IndexTicket(ctx, ticket)
}
// DeindexTicket removes the indexing for the specified Ticket. Only the indexes are removed but the Ticket continues to exist.
func (is *instrumentedService) DeindexTicket(ctx context.Context, id string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeindexTicket")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreDeindexTicketCount)
return is.s.DeindexTicket(ctx, id)
}
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
func (is *instrumentedService) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetTickets")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetTicketsCount)
return is.s.GetTickets(ctx, ids)
}
// GetIndexedIds returns the ids of all tickets currently indexed.
func (is *instrumentedService) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetIndexedIDSet")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetIndexedIDSetCount)
return is.s.GetIndexedIDSet(ctx)
}
// UpdateAssignments update using the request's specified tickets with assignments.
func (is *instrumentedService) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateAssignments")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreUpdateAssignmentsCount)
return is.s.UpdateAssignments(ctx, req)
}
// GetAssignments returns the assignment associated with the input ticket id
func (is *instrumentedService) GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetAssignments")
defer span.End()
return is.s.GetAssignments(ctx, id, func(a *pb.Assignment) error {
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetAssignmentsCount)
return callback(a)
})
return is.s.GetAssignments(ctx, id, callback)
}
// AddTicketsToIgnoreList appends new proposed tickets to the proposed sorted set with current timestamp
func (is *instrumentedService) AddTicketsToIgnoreList(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.AddTicketsToIgnoreList")
defer span.End()
defer telemetry.RecordNUnitMeasurement(ctx, mStateStoreAddTicketsToIgnoreListCount, int64(len(ids)))
return is.s.AddTicketsToIgnoreList(ctx, ids)
}
// DeleteTicketsFromIgnoreList deletes tickets from the proposed sorted set
func (is *instrumentedService) DeleteTicketsFromIgnoreList(ctx context.Context, ids []string) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.DeleteTicketsFromIgnoreList")
defer span.End()
defer telemetry.RecordNUnitMeasurement(ctx, mStateStoreDeleteTicketFromIgnoreListCount, int64(len(ids)))
return is.s.DeleteTicketsFromIgnoreList(ctx, ids)
}
func (is *instrumentedService) ReleaseAllTickets(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.ReleaseAllTickets")
defer span.End()
return is.s.ReleaseAllTickets(ctx)
}

View File

@ -61,6 +61,9 @@ type Service interface {
// DeleteTicketsFromIgnoreList deletes tickets from the proposed sorted set
DeleteTicketsFromIgnoreList(ctx context.Context, ids []string) error
// ReleaseAllTickets releases all pending tickets back to active
ReleaseAllTickets(ctx context.Context) error
// Closes the connection to the underlying storage.
Close() error
}

View File

@ -23,11 +23,11 @@ import (
"github.com/cenkalti/backoff"
"github.com/golang/protobuf/proto"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
@ -38,9 +38,6 @@ var (
"app": "openmatch",
"component": "statestore.redis",
})
mRedisConnLatencyMs = telemetry.HistogramWithBounds("redis/connectlatency", "latency to get a redis connection", "ms", telemetry.HistogramBounds)
mRedisConnPoolActive = telemetry.Gauge("redis/connectactivecount", "number of connections in the pool, includes idle plus connections in use")
mRedisConnPoolIdle = telemetry.Gauge("redis/connectidlecount", "number of idle connections in the pool")
)
type redisBackend struct {
@ -58,7 +55,7 @@ func (rb *redisBackend) Close() error {
func newRedis(cfg config.View) Service {
return &redisBackend{
healthCheckPool: getHealthCheckPool(cfg),
redisPool: getRedisPool(cfg),
redisPool: GetRedisPool(cfg),
cfg: cfg,
}
}
@ -92,7 +89,8 @@ func getHealthCheckPool(cfg config.View) *redis.Pool {
}
}
func getRedisPool(cfg config.View) *redis.Pool {
// GetRedisPool configures a new pool to connect to redis given the config.
func GetRedisPool(cfg config.View) *redis.Pool {
var dialFunc func(context.Context) (redis.Conn, error)
maxIdle := cfg.GetInt("redis.pool.maxIdle")
maxActive := cfg.GetInt("redis.pool.maxActive")
@ -176,16 +174,11 @@ func (rb *redisBackend) HealthCheck(ctx context.Context) error {
}
defer handleConnectionClose(&redisConn)
poolStats := rb.redisPool.Stats()
telemetry.SetGauge(ctx, mRedisConnPoolActive, int64(poolStats.ActiveCount))
telemetry.SetGauge(ctx, mRedisConnPoolIdle, int64(poolStats.IdleCount))
_, err = redisConn.Do("PING")
// Encountered an issue getting a connection from the pool.
if err != nil {
return status.Errorf(codes.Unavailable, "%v", err)
}
return nil
}
@ -228,7 +221,6 @@ func redisURLFromAddr(addr string, cfg config.View, usePassword bool) string {
}
func (rb *redisBackend) connect(ctx context.Context) (redis.Conn, error) {
startTime := time.Now()
redisConn, err := rb.redisPool.GetContext(ctx)
if err != nil {
redisLogger.WithFields(logrus.Fields{
@ -236,8 +228,6 @@ func (rb *redisBackend) connect(ctx context.Context) (redis.Conn, error) {
}).Error("failed to connect to redis")
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
telemetry.RecordNUnitMeasurement(ctx, mRedisConnLatencyMs, time.Since(startTime).Milliseconds())
return redisConn, nil
}
@ -395,13 +385,13 @@ func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{
}
defer handleConnectionClose(&redisConn)
ttl := rb.cfg.GetDuration("storage.ignoreListTTL")
ttl := rb.cfg.GetDuration("pendingReleaseTimeout")
curTime := time.Now()
curTimeInt := curTime.UnixNano()
endTimeInt := curTime.Add(time.Hour).UnixNano()
startTimeInt := curTime.Add(-ttl).UnixNano()
// Filter out tickets that are fetched but not assigned within ttl time (ms).
idsInIgnoreLists, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", "proposed_ticket_ids", startTimeInt, curTimeInt))
idsInIgnoreLists, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", "proposed_ticket_ids", startTimeInt, endTimeInt))
if err != nil {
redisLogger.WithError(err).Error("failed to get proposed tickets")
return nil, status.Errorf(codes.Internal, "error getting ignore list %v", err)
@ -529,8 +519,12 @@ func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTic
tickets = append(tickets, t)
}
}
assignmentTimeout := rb.cfg.GetDuration("assignedDeleteTimeout") / time.Millisecond
err = redisConn.Send("MULTI")
if err != nil {
return nil, errors.Wrap(err, "error starting redis multi")
}
cmds := make([]interface{}, 0, 2*len(tickets))
for _, ticket := range tickets {
ticket.Assignment = idToA[ticket.Id]
@ -540,13 +534,36 @@ func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTic
return nil, status.Errorf(codes.Internal, "failed to marshal ticket %s", ticket.GetId())
}
cmds = append(cmds, ticket.GetId(), ticketByte)
err = redisConn.Send("SET", ticket.Id, ticketByte, "PX", int64(assignmentTimeout), "XX")
if err != nil {
return nil, errors.Wrap(err, "error sending ticket assignment set")
}
}
_, err = redisConn.Do("MSET", cmds...)
wasSet, err := redis.Values(redisConn.Do("EXEC"))
if err != nil {
redisLogger.WithError(err).Errorf("failed to send ticket updates to redis %s", cmds)
return nil, err
return nil, errors.Wrap(err, "error executing assignment set")
}
if len(wasSet) != len(tickets) {
return nil, status.Errorf(codes.Internal, "sent %d tickets to redis, but received %d back", len(tickets), len(wasSet))
}
for i, ticket := range tickets {
v, err := redis.String(wasSet[i], nil)
if err == redis.ErrNil {
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
TicketId: ticket.Id,
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
})
continue
}
if err != nil {
return nil, errors.Wrap(err, "unexpected error from redis multi set")
}
if v != "OK" {
return nil, status.Errorf(codes.Internal, "unexpected response from redis: %s", v)
}
}
return resp, nil
@ -638,6 +655,17 @@ func (rb *redisBackend) DeleteTicketsFromIgnoreList(ctx context.Context, ids []s
return nil
}
func (rb *redisBackend) ReleaseAllTickets(ctx context.Context) error {
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
_, err = redisConn.Do("DEL", "proposed_ticket_ids")
return err
}
func handleConnectionClose(conn *redis.Conn) {
err := (*conn).Close()
if err != nil {

View File

@ -140,7 +140,7 @@ func TestIgnoreLists(t *testing.T) {
verifyTickets(service, len(tickets)-3)
// Sleep until the ignore list expired and verify we still have all the tickets
time.Sleep(cfg.GetDuration("storage.ignoreListTTL"))
time.Sleep(cfg.GetDuration("pendingReleaseTimeout"))
verifyTickets(service, len(tickets))
}
@ -288,7 +288,7 @@ func createRedis(t *testing.T, withSentinel bool, withPassword string) (config.V
cfg.Set("redis.pool.idleTimeout", time.Second)
cfg.Set("redis.pool.healthCheckTimeout", 100*time.Millisecond)
cfg.Set("redis.pool.maxActive", 5)
cfg.Set("storage.ignoreListTTL", "200ms")
cfg.Set("pendingReleaseTimeout", "200ms")
cfg.Set("backoff.initialInterval", 100*time.Millisecond)
cfg.Set("backoff.randFactor", 0.5)
cfg.Set("backoff.multiplier", 0.5)

View File

@ -25,8 +25,8 @@ const (
PoolIdleTimeout = 10 * time.Second
// PoolHealthCheckTimeout is the read/write timeout of a healthcheck HTTP request
PoolHealthCheckTimeout = 100 * time.Millisecond
// IgnoreListTTL is the time to live duration of Open Match ignore list settings
IgnoreListTTL = 500 * time.Millisecond
// pendingReleaseTimeout is the time to live duration of Open Match ignore list settings
pendingReleaseTimeout = 500 * time.Millisecond
// InitialInterval is the initial backoff time of a backoff strategy
InitialInterval = 30 * time.Millisecond
// RandFactor is the randomization factor of a backoff strategy
@ -36,5 +36,6 @@ const (
// MaxInterval is the maximum retry interval of a backoff strategy
MaxInterval = 300 * time.Millisecond
// MaxElapsedTime is the maximum total retry time of a backoff stragegy
MaxElapsedTime = 1000 * time.Millisecond
MaxElapsedTime = 1000 * time.Millisecond
assignedDeleteTimeout = 200 * time.Millisecond
)

View File

@ -45,7 +45,8 @@ func New(t *testing.T, cfg config.Mutable) func() {
cfg.Set("redis.pool.maxActive", PoolMaxActive)
cfg.Set("redis.pool.idleTimeout", PoolIdleTimeout)
cfg.Set("redis.pool.healthCheckTimeout", PoolHealthCheckTimeout)
cfg.Set("storage.ignoreListTTL", IgnoreListTTL)
cfg.Set("pendingReleaseTimeout", pendingReleaseTimeout)
cfg.Set("assignedDeleteTimeout", assignedDeleteTimeout)
cfg.Set("backoff.initialInterval", InitialInterval)
cfg.Set("backoff.randFactor", RandFactor)
cfg.Set("backoff.multiplier", Multiplier)

View File

@ -15,13 +15,12 @@
package telemetry
import (
"html/template"
"net/http"
"sort"
"bufio"
"bytes"
"fmt"
"html/template"
"net/http"
"sort"
"strings"
"github.com/spf13/viper"
@ -87,9 +86,11 @@ func (cz *configz) ServeHTTP(w http.ResponseWriter, req *http.Request) {
fmt.Print(s)
}
func bindConfigz(mux *http.ServeMux, cfg config.View) {
func bindConfigz(p Params, b Bindings) error {
cfg := p.Config()
if !cfg.GetBool(configNameTelemetryZpagesEnabled) {
return
return nil
}
mux.Handle(configEndpoint, &configz{cfg: cfg})
b.TelemetryHandle(configEndpoint, &configz{cfg: cfg})
return nil
}

View File

@ -17,8 +17,6 @@ package telemetry
import (
"fmt"
"net/http"
"open-match.dev/open-match/internal/config"
)
const (
@ -51,11 +49,13 @@ func newHelp() func(w http.ResponseWriter, req *http.Request) {
}
}
func bindHelp(mux *http.ServeMux, cfg config.View) {
if !cfg.GetBool(configNameTelemetryZpagesEnabled) {
return
func bindHelp(p Params, b Bindings) error {
if !p.Config().GetBool(configNameTelemetryZpagesEnabled) {
return nil
}
h := newHelp()
mux.HandleFunc(helpEndpoint, h)
mux.HandleFunc(helpSecondaryEndpoint, h)
b.TelemetryHandleFunc(helpEndpoint, h)
b.TelemetryHandleFunc(helpSecondaryEndpoint, h)
return nil
}

View File

@ -16,40 +16,42 @@ package telemetry
import (
"contrib.go.opencensus.io/exporter/jaeger"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"open-match.dev/open-match/internal/config"
)
func bindJaeger(servicePrefix string, cfg config.View) {
func bindJaeger(p Params, b Bindings) error {
cfg := p.Config()
if !cfg.GetBool("telemetry.jaeger.enable") {
logger.Info("Jaeger Tracing: Disabled")
return
return nil
}
agentEndpointURI := cfg.GetString("telemetry.jaeger.agentEndpoint")
collectorEndpointURI := cfg.GetString("telemetry.jaeger.collectorEndpoint")
je, err := jaeger.NewExporter(jaeger.Options{
AgentEndpoint: agentEndpointURI,
CollectorEndpoint: collectorEndpointURI,
ServiceName: servicePrefix,
})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err,
"agentEndpoint": agentEndpointURI,
"collectorEndpoint": collectorEndpointURI,
}).Fatalf(
"Failed to create the Jaeger exporter: %v", err)
}
// And now finally register it as a Trace Exporter
trace.RegisterExporter(je)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(cfg.GetFloat64("telemetry.jaeger.samplerFraction"))})
serviceName := p.ServiceName()
logger.WithFields(logrus.Fields{
"agentEndpoint": agentEndpointURI,
"collectorEndpoint": collectorEndpointURI,
"serviceName": serviceName,
}).Info("Jaeger Tracing: ENABLED")
je, err := jaeger.NewExporter(jaeger.Options{
AgentEndpoint: agentEndpointURI,
CollectorEndpoint: collectorEndpointURI,
ServiceName: serviceName,
})
if err != nil {
return errors.Wrap(err, "Failed to create the Jaeger exporter")
}
trace.RegisterExporter(je)
b.AddCloser(func() {
trace.UnregisterExporter(je)
})
return nil
}

View File

@ -22,9 +22,11 @@ import (
"go.opencensus.io/tag"
)
// Default histogram distributions
var (
// HistogramBounds defines a unified bucket boundaries for all histogram typed time metrics in Open Match
HistogramBounds = []float64{0, 50, 100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200}
DefaultBytesDistribution = view.Distribution(64, 128, 256, 512, 1024, 2048, 4096, 16384, 65536, 262144, 1048576)
DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
DefaultCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
)
// Gauge creates a gauge metric to be recorded with dimensionless unit.

View File

@ -1,45 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opencensus.io/stats"
utilTesting "open-match.dev/open-match/internal/util/testing"
)
func TestRecordUnitMeasurement(t *testing.T) {
ctx := utilTesting.NewContext(t)
c := Counter("telemetry/fake_metric", "fake")
RecordUnitMeasurement(ctx, c)
RecordUnitMeasurement(ctx, c)
}
func TestDoubleMetric(t *testing.T) {
assert := assert.New(t)
c := Counter("telemetry/fake_metric", "fake")
c2 := Counter("telemetry/fake_metric", "fake")
assert.Equal(c, c2)
}
func TestDoubleRegisterView(t *testing.T) {
assert := assert.New(t)
mFakeCounter := stats.Int64("telemetry/fake_metric", "Fake", "1")
v := counterView(mFakeCounter)
v2 := counterView(mFakeCounter)
assert.Equal(v, v2)
}

View File

@ -16,31 +16,39 @@ package telemetry
import (
"contrib.go.opencensus.io/exporter/ocagent"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"open-match.dev/open-match/internal/config"
)
func bindOpenCensusAgent(cfg config.View) func() error {
func bindOpenCensusAgent(p Params, b Bindings) error {
cfg := p.Config()
if !cfg.GetBool("telemetry.opencensusAgent.enable") {
logger.Info("OpenCensus Agent: Disabled")
return func() error { return nil }
return nil
}
agentEndpoint := cfg.GetString("telemetry.opencensusAgent.agentEndpoint")
logger.WithFields(logrus.Fields{
"agentEndpoint": agentEndpoint,
}).Info("OpenCensus Agent: ENABLED")
oce, err := ocagent.NewExporter(ocagent.WithAddress(agentEndpoint), ocagent.WithInsecure(), ocagent.WithServiceName("open-match"))
if err != nil {
logger.WithError(err).Fatalf("Failed to create a new ocagent exporter")
return errors.Wrap(err, "Failed to create a new ocagent exporter")
}
trace.RegisterExporter(oce)
view.RegisterExporter(oce)
logger.WithFields(logrus.Fields{
"agentEndpoint": agentEndpoint,
}).Info("OpenCensus Agent: ENABLED")
b.AddCloserErr(func() error {
view.UnregisterExporter(oce)
trace.UnregisterExporter(oce)
// Before the program stops, please remember to stop the exporter.
return oce.Stop()
})
// Before the program stops, please remember to stop the exporter.
return oce.Stop
return nil
}

View File

@ -19,8 +19,6 @@ import (
"fmt"
"net/http"
"sync/atomic"
"go.opencensus.io/tag"
)
const (
@ -32,11 +30,6 @@ const (
healthStateUnhealthy = int32(2)
)
var (
successKey = tag.MustNewKey("success")
mReadinessProbes = Counter("health/readiness", "readiness probes", successKey)
)
type statefulProbe struct {
healthState *int32
probes []func(context.Context) error
@ -58,11 +51,10 @@ func (sp *statefulProbe) ServeHTTP(w http.ResponseWriter, req *http.Request) {
logger.WithError(err).Warningf("%s health check failed. The server will terminate if this continues to happen.", HealthCheckEndpoint)
}
http.Error(w, err.Error(), http.StatusServiceUnavailable)
RecordUnitMeasurement(req.Context(), mReadinessProbes, tag.Insert(successKey, "false"))
return
}
}
RecordUnitMeasurement(req.Context(), mReadinessProbes, tag.Insert(successKey, "true"))
old := atomic.SwapInt32(sp.healthState, healthStateHealthy)
if old == healthStateUnhealthy {
logger.Infof("%s is healthy again.", HealthCheckEndpoint)

View File

@ -16,13 +16,11 @@ package telemetry
// Taken from https://opencensus.io/quickstart/go/metrics/#1
import (
"net/http"
ocPrometheus "contrib.go.opencensus.io/exporter/prometheus"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"open-match.dev/open-match/internal/config"
)
const (
@ -30,36 +28,46 @@ const (
ConfigNameEnableMetrics = "telemetry.prometheus.enable"
)
func bindPrometheus(mux *http.ServeMux, cfg config.View) {
func bindPrometheus(p Params, b Bindings) error {
cfg := p.Config()
if !cfg.GetBool("telemetry.prometheus.enable") {
logger.Info("Prometheus Metrics: Disabled")
return
return nil
}
endpoint := cfg.GetString("telemetry.prometheus.endpoint")
logger.WithFields(logrus.Fields{
"endpoint": endpoint,
}).Info("Prometheus Metrics: ENABLED")
registry := prometheus.NewRegistry()
// Register standard prometheus instrumentation.
registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
registry.MustRegister(prometheus.NewGoCollector())
err := registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
if err != nil {
return errors.Wrap(err, "Failed to register prometheus collector")
}
err = registry.Register(prometheus.NewGoCollector())
if err != nil {
return errors.Wrap(err, "Failed to register prometheus collector")
}
promExporter, err := ocPrometheus.NewExporter(
ocPrometheus.Options{
Namespace: "",
Registry: registry,
})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err,
"endpoint": endpoint,
}).Fatal(
"Failed to initialize OpenCensus exporter to Prometheus")
return errors.Wrap(err, "Failed to initialize OpenCensus exporter to Prometheus")
}
// Register the Prometheus exporters as a stats exporter.
view.RegisterExporter(promExporter)
b.AddCloser(func() {
view.UnregisterExporter(promExporter)
})
mux.Handle(endpoint, promExporter)
logger.WithFields(logrus.Fields{
"endpoint": endpoint,
}).Info("Prometheus Metrics: ENABLED")
b.TelemetryHandle(endpoint, promExporter)
return nil
}

View File

@ -18,10 +18,11 @@ import (
"net/http"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/util"
)
var (
@ -32,31 +33,61 @@ var (
)
// Setup configures the telemetry for the server.
func Setup(servicePrefix string, mux *http.ServeMux, cfg config.View) func() {
mc := util.NewMultiClose()
periodString := cfg.GetString("telemetry.reportingPeriod")
reportingPeriod, err := time.ParseDuration(periodString)
if err != nil {
logger.WithFields(logrus.Fields{
"error": err,
"reportingPeriod": periodString,
}).Info("Failed to parse telemetry.reportingPeriod, defaulting to 1m")
reportingPeriod = time.Minute * 1
func Setup(p Params, b Bindings) error {
bindings := []func(p Params, b Bindings) error{
configureOpenCensus,
bindJaeger,
bindPrometheus,
bindStackDriverMetrics,
bindOpenCensusAgent,
bindZpages,
bindHelp,
bindConfigz,
}
bindJaeger(servicePrefix, cfg)
bindPrometheus(mux, cfg)
mc.AddCloseFunc(bindStackDriverMetrics(cfg))
mc.AddCloseWithErrorFunc(bindOpenCensusAgent(cfg))
bindZpages(mux, cfg)
bindHelp(mux, cfg)
bindConfigz(mux, cfg)
for _, f := range bindings {
err := f(p, b)
if err != nil {
return err
}
}
// Change the frequency of updates to the metrics endpoint
view.SetReportingPeriod(reportingPeriod)
return nil
}
func configureOpenCensus(p Params, b Bindings) error {
// There's no way to undo these options, but the next startup will override
// them.
samplingFraction := p.Config().GetFloat64("telemetry.traceSamplingFraction")
logger.WithFields(logrus.Fields{
"samplingFraction": samplingFraction,
}).Info("Tracing sampler fraction set")
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(samplingFraction)})
periodString := p.Config().GetString("telemetry.reportingPeriod")
reportingPeriod, err := time.ParseDuration(periodString)
if err != nil {
return errors.Wrap(err, "Unable to parse telemetry.reportingPeriod")
}
logger.WithFields(logrus.Fields{
"reportingPeriod": reportingPeriod,
}).Info("telemetry has been configured.")
return mc.Close
}).Info("Telemetry reporting period set")
// Change the frequency of updates to the metrics endpoint
view.SetReportingPeriod(reportingPeriod)
return nil
}
// Params allows appmain to bind telemetry without a circular dependency.
type Params interface {
Config() config.View
ServiceName() string
}
// Bindings allows appmain to bind telemetry without a circular dependency.
type Bindings interface {
TelemetryHandle(pattern string, handler http.Handler)
TelemetryHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
AddCloser(c func())
AddCloserErr(c func() error)
}

View File

@ -16,43 +16,45 @@ package telemetry
import (
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"open-match.dev/open-match/internal/config"
)
func bindStackDriverMetrics(cfg config.View) func() {
func bindStackDriverMetrics(p Params, b Bindings) error {
cfg := p.Config()
if !cfg.GetBool("telemetry.stackdriverMetrics.enable") {
logger.Info("StackDriver Metrics: Disabled")
return func() {}
return nil
}
gcpProjectID := cfg.GetString("telemetry.stackdriverMetrics.gcpProjectId")
metricPrefix := cfg.GetString("telemetry.stackdriverMetrics.prefix")
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: gcpProjectID,
// MetricPrefix helps uniquely identify your metrics.
MetricPrefix: metricPrefix,
})
if err != nil {
logger.WithFields(logrus.Fields{
"error": err,
"gcpProjectID": gcpProjectID,
"metricPrefix": metricPrefix,
}).Fatal("Failed to initialize OpenCensus exporter to Stack Driver")
}
// Register it as a metrics exporter
view.RegisterExporter(sd)
// Register it as a trace exporter
trace.RegisterExporter(sd)
logger.WithFields(logrus.Fields{
"gcpProjectID": gcpProjectID,
"metricPrefix": metricPrefix,
}).Info("StackDriver Metrics: ENABLED")
// It is imperative to invoke flush before your main function exits
return sd.Flush
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: gcpProjectID,
// MetricPrefix helps uniquely identify your metrics.
MetricPrefix: metricPrefix,
})
if err != nil {
return errors.Wrap(err, "Failed to initialize OpenCensus exporter to Stack Driver")
}
view.RegisterExporter(sd)
trace.RegisterExporter(sd)
b.AddCloser(func() {
view.UnregisterExporter(sd)
trace.UnregisterExporter(sd)
// It is imperative to invoke flush before your main function exits
sd.Flush()
})
return nil
}

View File

@ -20,7 +20,6 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/zpages"
"open-match.dev/open-match/internal/config"
)
const (
@ -28,11 +27,17 @@ const (
configNameTelemetryZpagesEnabled = "telemetry.zpages.enable"
)
func bindZpages(mux *http.ServeMux, cfg config.View) {
if !cfg.GetBool(configNameTelemetryZpagesEnabled) {
func bindZpages(p Params, b Bindings) error {
if !p.Config().GetBool(configNameTelemetryZpagesEnabled) {
logger.Info("zPages: Disabled")
return
return nil
}
logger.WithFields(logrus.Fields{
"endpoint": debugEndpoint,
}).Info("zPages: ENABLED")
mux := http.NewServeMux()
zpages.Handle(mux, debugEndpoint)
mux.HandleFunc(debugEndpoint+"/pprof/", pprof.Index)
@ -41,7 +46,7 @@ func bindZpages(mux *http.ServeMux, cfg config.View) {
mux.HandleFunc(debugEndpoint+"/pprof/symbol", pprof.Symbol)
mux.HandleFunc(debugEndpoint+"/pprof/trace", pprof.Trace)
logger.WithFields(logrus.Fields{
"endpoint": debugEndpoint,
}).Info("zPages: ENABLED")
b.TelemetryHandle(debugEndpoint, mux)
return nil
}

View File

@ -18,177 +18,52 @@ package e2e
import (
"context"
"fmt"
"log"
"os"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/resolver"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"open-match.dev/open-match/internal/logging"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/util"
pb "open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
mmfService "open-match.dev/open-match/internal/testing/mmf"
)
func init() {
// Reset the gRPC resolver to passthrough for end-to-end out-of-cluster testings.
// DNS resolver is unsupported for end-to-end local testings.
resolver.SetDefaultScheme("dns")
}
type clusterOM struct {
kubeClient kubernetes.Interface
namespace string
t *testing.T
mc *util.MultiClose
}
func (com *clusterOM) withT(t *testing.T) OM {
return &clusterOM{
kubeClient: com.kubeClient,
namespace: com.namespace,
t: t,
mc: util.NewMultiClose(),
}
}
func (com *clusterOM) MustFrontendGRPC() pb.FrontendServiceClient {
conn, err := com.getGRPCClientFromServiceName("om-frontend")
if err != nil {
com.t.Fatalf("cannot create gRPC client, %s", err)
}
com.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewFrontendServiceClient(conn)
}
func (com *clusterOM) MustBackendGRPC() pb.BackendServiceClient {
conn, err := com.getGRPCClientFromServiceName("om-backend")
if err != nil {
com.t.Fatalf("cannot create gRPC client, %s", err)
}
com.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewBackendServiceClient(conn)
}
func (com *clusterOM) MustQueryServiceGRPC() pb.QueryServiceClient {
conn, err := com.getGRPCClientFromServiceName("om-query")
if err != nil {
com.t.Fatalf("cannot create gRPC client, %s", err)
}
com.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewQueryServiceClient(conn)
}
func (com *clusterOM) MustMmfConfigGRPC() *pb.FunctionConfig {
host, port := com.getGRPCAddressFromServiceName("om-function")
return &pb.FunctionConfig{
Host: host,
Port: port,
Type: pb.FunctionConfig_GRPC,
}
}
func (com *clusterOM) MustMmfConfigHTTP() *pb.FunctionConfig {
host, port := com.getHTTPAddressFromServiceName("om-function")
return &pb.FunctionConfig{
Host: host,
Port: port,
Type: pb.FunctionConfig_REST,
}
}
func (com *clusterOM) getAddressFromServiceName(serviceName, portName string) (string, int32) {
endpoints, err := com.kubeClient.CoreV1().Endpoints(com.namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
com.t.Fatalf("cannot get service definition for %s, %s", serviceName, err.Error())
}
if len(endpoints.Subsets) == 0 || len(endpoints.Subsets[0].Addresses) == 0 {
com.t.Fatalf("service %s does not have an available endpoint", serviceName)
}
var port int32
for _, endpointsPort := range endpoints.Subsets[0].Ports {
if endpointsPort.Name == portName {
port = endpointsPort.Port
}
}
return endpoints.Subsets[0].Addresses[0].IP, port
}
func (com *clusterOM) getGRPCAddressFromServiceName(serviceName string) (string, int32) {
return com.getAddressFromServiceName(serviceName, "grpc")
}
func (com *clusterOM) getHTTPAddressFromServiceName(serviceName string) (string, int32) {
return com.getAddressFromServiceName(serviceName, "http")
}
func (com *clusterOM) getGRPCClientFromServiceName(serviceName string) (*grpc.ClientConn, error) {
ipAddress, port := com.getGRPCAddressFromServiceName(serviceName)
conn, err := rpc.GRPCClientFromParams(&rpc.ClientParams{
Address: fmt.Sprintf("%s:%d", ipAddress, int(port)),
EnableRPCLogging: *testOnlyEnableRPCLoggingFlag,
EnableRPCPayloadLogging: logging.IsDebugLevel(*testOnlyLoggingLevel),
EnableMetrics: *testOnlyEnableMetrics,
func start(t *testing.T, eval evaluator.Evaluator, mmf mmfService.MatchFunction) (config.View, func(time.Duration)) {
clusterLock.Lock()
t.Cleanup(func() {
clusterLock.Unlock()
})
if err != nil {
return nil, errors.Wrapf(err, "cannot connect to gRPC %s:%d", ipAddress, port)
if !clusterStarted {
t.Fatal("Cluster not started")
}
return conn, nil
}
clusterEval = eval
clusterMMF = mmf
func (com *clusterOM) HealthCheck() error {
podList, err := com.kubeClient.CoreV1().Pods(com.namespace).List(metav1.ListOptions{})
cfg, err := config.Read()
if err != nil {
return errors.Wrap(err, "cannot get pods list")
t.Fatal(err)
}
for _, pod := range podList.Items {
if app, ok := pod.ObjectMeta.Labels["app"]; ok && app == "open-match" && pod.Status.Phase != corev1.PodRunning {
return errors.Errorf("pod %+v is not running.", pod)
t.Cleanup(func() {
pool := statestore.GetRedisPool(cfg)
conn, err := pool.GetContext(context.Background())
if err != nil {
t.Fatal(err)
}
}
return nil
_, err = conn.Do("FLUSHALL")
if err != nil {
t.Fatal(err)
}
err = pool.Close()
if err != nil {
t.Fatal(err)
}
})
return cfg, time.Sleep
}
func (com *clusterOM) Context() context.Context {
return context.Background()
}
func (com *clusterOM) cleanup() {
com.mc.Close()
}
func (com *clusterOM) cleanupMain() error {
return nil
}
func fileExists(name string) bool {
_, err := os.Stat(name)
return err == nil
}
func createZygote(m *testing.M) (OM, error) {
// creates the in-cluster config
kubeconfig, err := rest.InClusterConfig()
if err != nil {
log.Fatal(err.Error())
}
kubeClient, err := kubernetes.NewForConfig(kubeconfig)
if err != nil {
return nil, errors.Wrapf(err, "creating Kubernetes client from config failed\nconfig= %+v", kubeconfig)
}
return &clusterOM{
kubeClient: kubeClient,
namespace: os.Getenv("NAMESPACE"),
}, nil
}
var clusterLock sync.Mutex
var clusterEval evaluator.Evaluator
var clusterMMF mmfService.MatchFunction
var clusterStarted bool

View File

@ -0,0 +1,106 @@
// +build e2ecluster
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"fmt"
"os"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/appmain/apptest"
"open-match.dev/open-match/internal/config"
mmfService "open-match.dev/open-match/internal/testing/mmf"
"open-match.dev/open-match/pkg/pb"
"strings"
)
func TestServiceHealth(t *testing.T) {
kubeconfig, err := rest.InClusterConfig()
if err != nil {
t.Fatal(err)
}
kubeClient, err := kubernetes.NewForConfig(kubeconfig)
if err != nil {
t.Fatalf("%s: creating Kubernetes client from config failed\nconfig= %+v", err, kubeconfig)
}
namespace := os.Getenv("NAMESPACE")
podList, err := kubeClient.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
for _, pod := range podList.Items {
if app, ok := pod.ObjectMeta.Labels["app"]; ok && app == "open-match" && pod.Status.Phase != corev1.PodRunning {
t.Errorf("pod %+v is not running.", pod)
}
}
}
func TestMain(m *testing.M) {
clusterStarted = true
mmf := func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return clusterMMF(ctx, profile, out)
}
eval := func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
return clusterEval(ctx, in, out)
}
cleanup, err := apptest.RunInCluster(mmfService.BindServiceFor(mmf), evaluator.BindServiceFor(eval))
if err != nil {
fmt.Println("Error starting mmf and evaluator:", err)
os.Exit(1)
}
exitCode := m.Run()
err = cleanup()
if err != nil {
fmt.Println("Error stopping mmf and evaluator:", err)
os.Exit(1)
}
os.Exit(exitCode)
}
// TestConfigMatch covers that the config file used for local in memory e2e
// tests matches the configs used for the in cluster tests, to avoid drift.
func TestConfigMatch(t *testing.T) {
cfg, err := config.Read()
if err != nil {
t.Fatal(err)
}
cfgMemory := viper.New()
cfgMemory.SetConfigType("yaml")
err = cfgMemory.ReadConfig(strings.NewReader(configFile))
if err != nil {
t.Fatal(err)
}
require.Equal(t, cfgMemory.AllSettings(), cfg.AllSettings())
}

View File

@ -17,12 +17,16 @@ package e2e
import (
"context"
"flag"
"log"
"os"
"sync"
"testing"
"time"
"google.golang.org/grpc/resolver"
pb "open-match.dev/open-match/pkg/pb"
"github.com/pkg/errors"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/appmain/apptest"
"open-match.dev/open-match/internal/config"
mmfService "open-match.dev/open-match/internal/testing/mmf"
"open-match.dev/open-match/pkg/pb"
)
var (
@ -31,52 +35,216 @@ var (
testOnlyLoggingLevel = flag.String("test_only_log_level", "info", "Sets the log level for tests.")
)
// OM is the interface for communicating with Open Match.
type OM interface {
// MustFrontendGRPC returns a gRPC client to frontend server.
MustFrontendGRPC() pb.FrontendServiceClient
// MustBackendGRPC returns a gRPC client to backend server.
MustBackendGRPC() pb.BackendServiceClient
// MustQueryServiceGRPC returns a gRPC client to query server.
MustQueryServiceGRPC() pb.QueryServiceClient
// HealthCheck probes the cluster for readiness.
HealthCheck() error
// MustMmfConfigGRPC returns a grpc match function config for backend server.
MustMmfConfigGRPC() *pb.FunctionConfig
// MustMmfConfigHTTP returns a http match function config for backend server.
MustMmfConfigHTTP() *pb.FunctionConfig
// Context provides a context to call remote methods.
Context() context.Context
func newOM(t *testing.T) *om {
om := &om{
t: t,
}
t.Cleanup(func() {
om.fLock.Lock()
defer om.fLock.Unlock()
om.running.Wait()
// Set this cleanup before starting servers, so that servers will be
// stopped before this runs.
if om.mmf != nil && !om.mmfCalled {
t.Error("MMF set but never called.")
}
if om.eval != nil && !om.evalCalled {
t.Error("Evaluator set but never called.")
}
})
cleanup()
cleanupMain() error
withT(t *testing.T) OM
}
om.cfg, om.AdvanceTTLTime = start(t, om.evaluate, om.runMMF)
om.fe = pb.NewFrontendServiceClient(apptest.GRPCClient(t, om.cfg, "api.frontend"))
om.be = pb.NewBackendServiceClient(apptest.GRPCClient(t, om.cfg, "api.backend"))
om.query = pb.NewQueryServiceClient(apptest.GRPCClient(t, om.cfg, "api.query"))
// New creates a new e2e test interface.
func New(t *testing.T) OM {
om := zygote.withT(t)
t.Cleanup(om.cleanup)
return om
}
// RunMain provides the setup and teardown for Open Match e2e tests.
func RunMain(m *testing.M) {
// Reset the gRPC resolver to passthrough for end-to-end out-of-cluster testings.
// DNS resolver is unsupported for end-to-end local testings.
resolver.SetDefaultScheme("passthrough")
var exitCode int
z, err := createZygote(m)
if err != nil {
log.Fatalf("failed to setup framework: %s", err)
}
defer func() {
cErr := z.cleanupMain()
if cErr != nil {
log.Printf("failed to cleanup resources: %s", cErr)
}
os.Exit(exitCode)
}()
zygote = z
exitCode = m.Run()
type om struct {
t *testing.T
cfg config.View
fe pb.FrontendServiceClient
be pb.BackendServiceClient
query pb.QueryServiceClient
// For local tests, advances the mini-redis ttl time. For in cluster tests,
// just sleeps.
AdvanceTTLTime func(time.Duration)
running sync.WaitGroup
fLock sync.Mutex
mmfCalled bool
evalCalled bool
mmf mmfService.MatchFunction
eval evaluator.Evaluator
}
func (om *om) SetMMF(mmf mmfService.MatchFunction) {
om.fLock.Lock()
defer om.fLock.Unlock()
if om.mmf == nil {
om.mmf = mmf
return
}
om.t.Fatal("Matchmaking function set multiple times")
}
func (om *om) runMMF(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
om.fLock.Lock()
om.running.Add(1)
defer om.running.Done()
mmf := om.mmf
om.mmfCalled = true
om.fLock.Unlock()
if mmf == nil {
return errors.New("MMF called without being set")
}
return mmf(ctx, profile, out)
}
func (om *om) SetEvaluator(eval evaluator.Evaluator) {
om.fLock.Lock()
defer om.fLock.Unlock()
if om.eval == nil {
om.eval = eval
return
}
om.t.Fatal("Evaluator function set multiple times")
}
func (om *om) evaluate(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
om.fLock.Lock()
om.running.Add(1)
defer om.running.Done()
eval := om.eval
om.evalCalled = true
om.fLock.Unlock()
if eval == nil {
return errors.New("Evaluator called without being set")
}
return eval(ctx, in, out)
}
func (om *om) Frontend() pb.FrontendServiceClient {
return om.fe
}
func (om *om) Backend() pb.BackendServiceClient {
return om.be
}
func (om *om) Query() pb.QueryServiceClient {
return om.query
}
func (om *om) MMFConfigGRPC() *pb.FunctionConfig {
return &pb.FunctionConfig{
Host: om.cfg.GetString("api." + apptest.ServiceName + ".hostname"),
Port: int32(om.cfg.GetInt("api." + apptest.ServiceName + ".grpcport")),
Type: pb.FunctionConfig_GRPC,
}
}
func (om *om) MMFConfigHTTP() *pb.FunctionConfig {
return &pb.FunctionConfig{
Host: om.cfg.GetString("api." + apptest.ServiceName + ".hostname"),
Port: int32(om.cfg.GetInt("api." + apptest.ServiceName + ".httpport")),
Type: pb.FunctionConfig_REST,
}
}
// Testing constants which must match the configuration. Not parsed in test so
// that parsing bugs can't hide logic bugs.
const registrationInterval = time.Millisecond * 200
const proposalCollectionInterval = time.Millisecond * 200
const pendingReleaseTimeout = time.Millisecond * 200
const assignedDeleteTimeout = time.Millisecond * 200
// configFile is the "cononical" test config. It exactly matches the configmap
// which is used in the real cluster tests.
const configFile = `
registrationInterval: 200ms
proposalCollectionInterval: 200ms
pendingReleaseTimeout: 200ms
assignedDeleteTimeout: 200ms
queryPageSize: 10
logging:
level: debug
format: text
rpc: false
backoff:
initialInterval: 100ms
maxInterval: 500ms
multiplier: 1.5
randFactor: 0.5
maxElapsedTime: 3000ms
api:
backend:
hostname: "om-backend"
grpcport: "50505"
httpport: "51505"
frontend:
hostname: "om-frontend"
grpcport: "50504"
httpport: "51504"
query:
hostname: "om-query"
grpcport: "50503"
httpport: "51503"
synchronizer:
hostname: "om-synchronizer"
grpcport: "50506"
httpport: "51506"
swaggerui:
hostname: "om-swaggerui"
httpport: "51500"
scale:
httpport: "51509"
evaluator:
hostname: "test"
grpcport: "50509"
httpport: "51509"
test:
hostname: "test"
grpcport: "50509"
httpport: "51509"
redis:
sentinelPort: 26379
sentinelMaster: om-redis-master
sentinelHostname: om-redis
sentinelUsePassword:
usePassword: false
passwordPath: /opt/bitnami/redis/secrets/redis-password
pool:
maxIdle: 200
maxActive: 0
idleTimeout: 0
healthCheckTimeout: 300ms
telemetry:
reportingPeriod: "1m"
traceSamplingFraction: "0.01"
zpages:
enable: "true"
jaeger:
enable: "false"
agentEndpoint: "open-match-jaeger-agent:6831"
collectorEndpoint: "http://open-match-jaeger-collector:14268/api/traces"
prometheus:
enable: "false"
endpoint: "/metrics"
serviceDiscovery: "true"
stackdriverMetrics:
enable: "false"
gcpProjectId: "intentionally-invalid-value"
prefix: "open_match"
`

View File

@ -1,40 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
var (
zygote OM
)
const (
// Map1BeginnerPool is a pool name.
Map1BeginnerPool = "map1beginner"
// Map1AdvancedPool is pool name.
Map1AdvancedPool = "map1advanced"
// Map2BeginnerPool is pool name.
Map2BeginnerPool = "map2beginner"
// Map2AdvancedPool is pool name.
Map2AdvancedPool = "map2advanced"
// DoubleArgMMR is an index used to test DoubleRangeFilter.
DoubleArgMMR = "attribute.mmr"
// DoubleArgLevel is an index used to test DoubleRangeFilter.
DoubleArgLevel = "attribute.level"
// DoubleArgDefense is an index used to test DoubleRangeFilter.
DoubleArgDefense = "attribute.defense"
// ModeDemo is an index used to test BoolEqualsFilter
ModeDemo = "mode.demo"
// Role is an index used to test StringEqualsFilter
Role = "char"
)

View File

@ -0,0 +1,843 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"io"
"sync"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
// TestHappyPath does a simple test of successfully creating a match with two tickets.
func TestHappyPath(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1, t2},
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- m
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
p, ok := <-in
require.True(t, ok)
require.True(t, proto.Equal(p, m))
_, ok = <-in
require.False(t, ok)
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m, resp.Match))
resp, err = stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestMatchFunctionMatchCollision covers two matches with the same id coming
// from the same MMF generates an error to the fetch matches call. Also ensures
// another function running in the same cycle does not experience an error.
func TestMatchFunctionMatchCollision(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
// Both mmf runs wait for the other to start before sending results, to
// ensure they run in the same cycle.
errorMMFStarted := make(chan struct{})
successMMFStarted := make(chan struct{})
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
switch profile.Name {
case "error":
close(errorMMFStarted)
<-successMMFStarted
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1},
}
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t2},
}
case "success":
close(successMMFStarted)
<-errorMMFStarted
out <- &pb.Match{
MatchId: "3",
Tickets: []*pb.Ticket{t2},
}
default:
panic("Unknown profile!")
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
for m := range in {
if m.MatchId == "3" {
out <- "3"
}
}
return nil
})
startTime := time.Now()
sError, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "error",
},
})
require.Nil(t, err)
sSuccess, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "success",
},
})
require.Nil(t, err)
resp, err := sError.Recv()
require.Contains(t, err.Error(), "MatchMakingFunction returned same match_id twice: \"1\"")
require.Nil(t, resp)
resp, err = sSuccess.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(t2, resp.Match.Tickets[0]))
require.True(t, time.Since(startTime) < registrationInterval, "%s", time.Since(startTime))
resp, err = sSuccess.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestSynchronizerMatchCollision covers two different MMFs generating matches
// with the same id, and causing all fetch match calls to fail with an error
// indicating this occurred.
func TestSynchronizerMatchCollision(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1},
}
return nil
})
timesEvaluatorCalled := 0
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
timesEvaluatorCalled++
require.Equal(t, 1, timesEvaluatorCalled)
for m := range in {
out <- m.MatchId
}
return nil
})
s1, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
// Flow first match through before starting second mmf, so the second mmf
// is the one which gets the collision error.
_, err = s1.Recv()
require.Nil(t, err)
s2, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := s2.Recv()
require.Nil(t, resp)
require.Contains(t, err.Error(), "multiple match functions used same match_id: \"1\"")
resp, err = s1.Recv()
require.Contains(t, err.Error(), "multiple match functions used same match_id: \"1\"")
require.Nil(t, resp)
}
// TestEvaluatorReturnInvalidId covers the evaluator returning an ID which does
// not correspond to any match passed to it.
func TestEvaluatorReturnInvalidId(t *testing.T) {
ctx := context.Background()
om := newOM(t)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
out <- "1"
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Contains(t, err.Error(), "evaluator returned match_id \"1\" which does not correspond to its any match in its input")
require.Nil(t, resp)
}
// TestEvaluatorReturnDuplicateMatchId covers the evaluator returning the same
// match id twice, which causes an error for fetch match callers.
func TestEvaluatorReturnDuplicateMatchId(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1, t2},
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- m
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
p, ok := <-in
require.True(t, ok)
require.True(t, proto.Equal(p, m))
_, ok = <-in
require.False(t, ok)
out <- m.MatchId
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
_, err = stream.Recv()
// May receive up to one match
if err == nil {
_, err = stream.Recv()
}
require.Contains(t, err.Error(), "evaluator returned same match_id twice: \"1\"")
}
// TestMatchWithNoTickets covers that it is valid to create a match with no
// tickets specified. This is a questionable use case, but it works currently
// so it probably shouldn't be changed without significant justification.
func TestMatchWithNoTickets(t *testing.T) {
ctx := context.Background()
om := newOM(t)
m := &pb.Match{
MatchId: "1",
Tickets: nil,
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- m
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
p, ok := <-in
require.True(t, ok)
require.True(t, proto.Equal(p, m))
_, ok = <-in
require.False(t, ok)
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m, resp.Match))
resp, err = stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestEvaluatorError covers an evaluator returning an error message, and
// ensuring that the error message is returned to the fetch matches call.
func TestEvaluatorError(t *testing.T) {
ctx := context.Background()
om := newOM(t)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
return errors.New("my custom error")
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Contains(t, err.Error(), "my custom error")
require.Nil(t, resp)
}
// TestEvaluatorError covers an MMF returning an error message, and ensuring
// that the error message is returned to the fetch matches call.
func TestMMFError(t *testing.T) {
ctx := context.Background()
om := newOM(t)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return errors.New("my custom error")
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
_, ok := <-in
require.False(t, ok)
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Contains(t, err.Error(), "my custom error")
require.Nil(t, resp)
}
// TestNoMatches covers that returning no matches is acceptable.
func TestNoMatches(t *testing.T) {
ctx := context.Background()
om := newOM(t)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
_, ok := <-in
require.False(t, ok)
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestNoMatches covers missing the profile field on fetch matches.
func TestNoProfile(t *testing.T) {
ctx := context.Background()
om := newOM(t)
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: nil,
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, ".profile is required", status.Convert(err).Message())
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
require.Nil(t, resp)
}
// TestNoConfig covers missing the config field on fetch matches.
func TestNoConfig(t *testing.T) {
ctx := context.Background()
om := newOM(t)
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: nil,
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, ".config is required", status.Convert(err).Message())
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
require.Nil(t, resp)
}
// TestCancel covers a fetch matches call canceling also causing mmf and
// evaluator to cancel.
func TestCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
om := newOM(t)
startTime := time.Now()
wgStarted := sync.WaitGroup{}
wgStarted.Add(2)
wgFinished := sync.WaitGroup{}
wgFinished.Add(2)
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
wgStarted.Done()
<-ctx.Done()
require.Equal(t, ctx.Err(), context.Canceled)
wgFinished.Done()
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
wgStarted.Done()
<-ctx.Done()
require.Equal(t, ctx.Err(), context.Canceled)
wgFinished.Done()
return nil
})
_, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
wgStarted.Wait()
cancel()
wgFinished.Wait()
// The evaluator is only canceled after the registration window completes.
require.True(t, time.Since(startTime) > registrationInterval, "%s", time.Since(startTime))
}
// TestStreaming covers that matches can stream through the mmf, evaluator, and
// return to the fetch matches call. At no point are all matches accumulated
// and then passed on. This keeps things efficiently moving.
func TestStreaming(t *testing.T) {
ctx := context.Background()
om := newOM(t)
wg := sync.WaitGroup{}
wg.Add(1)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m1 := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1},
}
m2 := &pb.Match{
MatchId: "2",
Tickets: []*pb.Ticket{t2},
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- m1
wg.Wait()
out <- m2
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
<-in
out <- "1"
wg.Wait()
<-in
out <- "2"
_, ok := <-in
require.False(t, ok)
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m1, resp.Match))
wg.Done()
resp, err = stream.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m2, resp.Match))
resp, err = stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestRegistrationWindow covers a synchronization cycle waiting for the
// registration window before closing the evaluator. However it also does not
// wait until the proposal window has closed if the mmfs have already returned.
func TestRegistrationWindow(t *testing.T) {
ctx := context.Background()
om := newOM(t)
startTime := time.Now()
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
_, ok := <-in
require.False(t, ok)
require.True(t, time.Since(startTime) > registrationInterval, "%s", time.Since(startTime))
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
require.True(t, time.Since(startTime) > registrationInterval, "%s", time.Since(startTime))
}
// TestProposalWindowClose covers that a long running match function will get
// canceled so that the cycle can complete.
func TestProposalWindowClose(t *testing.T) {
ctx := context.Background()
om := newOM(t)
startTime := time.Now()
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
<-ctx.Done()
require.Equal(t, ctx.Err(), context.Canceled)
require.True(t, time.Since(startTime) > registrationInterval+proposalCollectionInterval, "%s", time.Since(startTime))
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
_, ok := <-in
require.False(t, ok)
require.True(t, time.Since(startTime) > registrationInterval+proposalCollectionInterval, "%s", time.Since(startTime))
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Contains(t, err.Error(), "match function ran longer than proposal window, canceling")
require.Nil(t, resp)
require.True(t, time.Since(startTime) > registrationInterval+proposalCollectionInterval, "%s", time.Since(startTime))
}
// TestMultipleFetchCalls covers multiple fetch matches calls running in the
// same cycle, using the same evaluator call, and having matches routed back to
// the correct caller.
func TestMultipleFetchCalls(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m1 := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1},
}
m2 := &pb.Match{
MatchId: "2",
Tickets: []*pb.Ticket{t2},
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
switch profile.Name {
case "one":
out <- m1
case "two":
out <- m2
default:
return errors.New("Unknown profile")
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
ids := []string{}
for m := range in {
ids = append(ids, m.MatchId)
}
require.ElementsMatch(t, ids, []string{"1", "2"})
for _, id := range ids {
out <- id
}
return nil
})
s1, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "one",
},
})
require.Nil(t, err)
s2, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "two",
},
})
require.Nil(t, err)
resp, err := s1.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m1, resp.Match))
resp, err = s1.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
resp, err = s2.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m2, resp.Match))
resp, err = s2.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestSlowBackendDoesntBlock covers that after the evaluator has returned, a
// new cycle can start despite and slow fetch matches caller. Additionally, it
// confirms that the tickets are marked as pending, so the second cycle won't be
// using any of the tickets returned by the first cycle.
func TestSlowBackendDoesntBlock(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m1 := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1},
}
m2 := &pb.Match{
MatchId: "2",
Tickets: []*pb.Ticket{t2},
}
evaluatorDone := make(chan struct{})
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
pool, mmfErr := matchfunction.QueryPool(ctx, om.Query(), &pb.Pool{})
require.Nil(t, mmfErr)
ids := []string{}
for _, t := range pool {
ids = append(ids, t.Id)
}
switch profile.Name {
case "one":
require.ElementsMatch(t, ids, []string{t1.Id, t2.Id})
out <- m1
case "two":
require.ElementsMatch(t, ids, []string{t2.Id})
out <- m2
default:
return errors.New("Unknown profile")
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
m := <-in
_, ok := <-in
require.False(t, ok)
out <- m.MatchId
evaluatorDone <- struct{}{}
return nil
})
s1, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "one",
},
})
require.Nil(t, err)
<-evaluatorDone
s2, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "two",
},
})
require.Nil(t, err)
<-evaluatorDone
resp, err := s2.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m2, resp.Match))
resp, err = s2.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
resp, err = s1.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m1, resp.Match))
resp, err = s1.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
// TestHTTPMMF covers calling the MMF with http config instead of gRPC.
func TestHTTPMMF(t *testing.T) {
ctx := context.Background()
om := newOM(t)
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
m := &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{t1, t2},
}
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- m
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
p, ok := <-in
require.True(t, ok)
require.True(t, proto.Equal(p, m))
_, ok = <-in
require.False(t, ok)
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigHTTP(),
Profile: &pb.MatchProfile{},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.True(t, proto.Equal(m, resp.Match))
resp, err = stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}

View File

@ -1,4 +1,5 @@
// +build !e2ecluster
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
@ -16,165 +17,76 @@
package e2e
import (
"context"
"net"
"strings"
"testing"
"time"
"github.com/Bose/minisentinel"
miniredis "github.com/alicebob/miniredis/v2"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/app/evaluator/defaulteval"
"open-match.dev/open-match/internal/app/minimatch"
"open-match.dev/open-match/internal/appmain/apptest"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
rpcTesting "open-match.dev/open-match/internal/rpc/testing"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
"open-match.dev/open-match/internal/telemetry"
internalMmf "open-match.dev/open-match/internal/testing/mmf"
"open-match.dev/open-match/internal/util"
pb "open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/test/matchfunction/mmf"
mmfService "open-match.dev/open-match/internal/testing/mmf"
)
type inmemoryOM struct {
mainTc *rpcTesting.TestContext
mmfTc *rpcTesting.TestContext
evalTc *rpcTesting.TestContext
t *testing.T
mc *util.MultiClose
}
func (iom *inmemoryOM) withT(t *testing.T) OM {
evalTc := createEvaluatorForTest(t)
mainTc := createMinimatchForTest(t, evalTc)
mmfTc := createMatchFunctionForTest(t, mainTc)
om := &inmemoryOM{
mainTc: mainTc,
mmfTc: mmfTc,
evalTc: evalTc,
t: t,
mc: util.NewMultiClose(),
func start(t *testing.T, eval evaluator.Evaluator, mmf mmfService.MatchFunction) (config.View, func(time.Duration)) {
mredis := miniredis.NewMiniRedis()
err := mredis.StartAddr("localhost:0")
if err != nil {
t.Fatalf("failed to start miniredis, %v", err)
}
return om
}
t.Cleanup(mredis.Close)
func createZygote(m *testing.M) (OM, error) {
return &inmemoryOM{}, nil
}
func (iom *inmemoryOM) MustFrontendGRPC() pb.FrontendServiceClient {
conn := iom.mainTc.MustGRPC()
iom.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewFrontendServiceClient(conn)
}
func (iom *inmemoryOM) MustBackendGRPC() pb.BackendServiceClient {
conn := iom.mainTc.MustGRPC()
iom.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewBackendServiceClient(conn)
}
func (iom *inmemoryOM) MustQueryServiceGRPC() pb.QueryServiceClient {
conn := iom.mainTc.MustGRPC()
iom.mc.AddCloseWithErrorFunc(conn.Close)
return pb.NewQueryServiceClient(conn)
}
func (iom *inmemoryOM) MustMmfConfigGRPC() *pb.FunctionConfig {
return &pb.FunctionConfig{
Host: iom.mmfTc.GetHostname(),
Port: int32(iom.mmfTc.GetGRPCPort()),
Type: pb.FunctionConfig_GRPC,
msentinal := minisentinel.NewSentinel(mredis)
err = msentinal.StartAddr("localhost:0")
if err != nil {
t.Fatalf("failed to start minisentinel, %v", err)
}
}
t.Cleanup(msentinal.Close)
func (iom *inmemoryOM) MustMmfConfigHTTP() *pb.FunctionConfig {
return &pb.FunctionConfig{
Host: iom.mmfTc.GetHostname(),
Port: int32(iom.mmfTc.GetHTTPPort()),
Type: pb.FunctionConfig_REST,
grpcListener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
}
}
_, grpcPort, err := net.SplitHostPort(grpcListener.Addr().String())
if err != nil {
t.Fatal(err)
}
httpListener, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
}
_, httpPort, err := net.SplitHostPort(httpListener.Addr().String())
if err != nil {
t.Fatal(err)
}
listeners := []net.Listener{grpcListener, httpListener}
func (iom *inmemoryOM) HealthCheck() error {
return nil
}
func (iom *inmemoryOM) Context() context.Context {
return iom.mainTc.Context()
}
func (iom *inmemoryOM) cleanup() {
iom.mc.Close()
iom.mainTc.Close()
iom.mmfTc.Close()
iom.evalTc.Close()
}
func (iom *inmemoryOM) cleanupMain() error {
return nil
}
// Create a minimatch test service with function bindings from frontendService, backendService, and queryService.
// Instruct this service to start and connect to a fake storage service.
func createMinimatchForTest(t *testing.T, evalTc *rpcTesting.TestContext) *rpcTesting.TestContext {
var closer func()
cfg := viper.New()
cfg.SetConfigType("yaml")
err = cfg.ReadConfig(strings.NewReader(configFile))
if err != nil {
t.Fatal(err)
}
// TODO: Use insecure for now since minimatch and mmf only works with the same secure mode
// Server a minimatch for testing using random port at tc.grpcAddress & tc.proxyAddress
tc := rpcTesting.MustServeInsecure(t, func(p *rpc.ServerParams) {
closer = statestoreTesting.New(t, cfg)
cfg.Set("storage.page.size", 10)
assert.Nil(t, minimatch.BindService(p, cfg))
})
// TODO: Revisit the Minimatch test setup in future milestone to simplify passing config
// values between components. The backend needs to connect to to the synchronizer but when
// it is initialized, does not know what port the synchronizer is on. To work around this,
// the backend sets up a connection to the synchronizer at runtime and hence can access these
// config values to establish the connection.
cfg.Set("api.synchronizer.hostname", tc.GetHostname())
cfg.Set("api.synchronizer.grpcport", tc.GetGRPCPort())
cfg.Set("api.synchronizer.httpport", tc.GetHTTPPort())
cfg.Set("synchronizer.registrationIntervalMs", "200ms")
cfg.Set("synchronizer.proposalCollectionIntervalMs", "200ms")
cfg.Set("api.evaluator.hostname", evalTc.GetHostname())
cfg.Set("api.evaluator.grpcport", evalTc.GetGRPCPort())
cfg.Set("api.evaluator.httpport", evalTc.GetHTTPPort())
cfg.Set("synchronizer.enabled", true)
cfg.Set("redis.sentinelHostname", msentinal.Host())
cfg.Set("redis.sentinelPort", msentinal.Port())
cfg.Set("redis.sentinelMaster", msentinal.MasterInfo().Name)
services := []string{apptest.ServiceName, "synchronizer", "backend", "frontend", "query", "evaluator"}
for _, name := range services {
cfg.Set("api."+name+".hostname", "localhost")
cfg.Set("api."+name+".grpcport", grpcPort)
cfg.Set("api."+name+".httpport", httpPort)
}
cfg.Set(rpc.ConfigNameEnableRPCLogging, *testOnlyEnableRPCLoggingFlag)
cfg.Set("logging.level", *testOnlyLoggingLevel)
cfg.Set(telemetry.ConfigNameEnableMetrics, *testOnlyEnableMetrics)
// TODO: This is very ugly. Need a better story around closing resources.
tc.AddCloseFunc(closer)
return tc
}
// Create a mmf service using a started test server.
// Inject the port config of queryService using that the passed in test server
func createMatchFunctionForTest(t *testing.T, c *rpcTesting.TestContext) *rpcTesting.TestContext {
// TODO: Use insecure for now since minimatch and mmf only works with the same secure mode
tc := rpcTesting.MustServeInsecure(t, func(p *rpc.ServerParams) {
cfg := viper.New()
// The below configuration is used by GRPC harness to create an queryService client to query tickets.
cfg.Set("api.query.hostname", c.GetHostname())
cfg.Set("api.query.grpcport", c.GetGRPCPort())
cfg.Set("api.query.httpport", c.GetHTTPPort())
assert.Nil(t, internalMmf.BindService(p, cfg, &internalMmf.FunctionSettings{
Func: mmf.MakeMatches,
}))
})
return tc
}
// Create an evaluator service that will be used by the minimatch tests.
func createEvaluatorForTest(t *testing.T) *rpcTesting.TestContext {
tc := rpcTesting.MustServeInsecure(t, func(p *rpc.ServerParams) {
cfg := viper.New()
assert.Nil(t, evaluator.BindService(p, cfg, defaulteval.Evaluate))
})
return tc
apptest.TestApp(t, cfg, listeners, minimatch.BindService, mmfService.BindServiceFor(mmf), evaluator.BindServiceFor(eval))
return cfg, mredis.FastForward
}

View File

@ -1,23 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"testing"
)
func TestMain(m *testing.M) {
RunMain(m)
}

View File

@ -1,5 +1,3 @@
// +build !e2ecluster
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
@ -25,17 +23,14 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/filter/testcases"
"open-match.dev/open-match/internal/testing/e2e"
"open-match.dev/open-match/pkg/pb"
)
func TestNoPool(t *testing.T) {
om := e2e.New(t)
q := om.MustQueryServiceGRPC()
om := newOM(t)
{
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: nil})
stream, err := om.Query().QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: nil})
require.Nil(t, err)
resp, err := stream.Recv()
@ -44,7 +39,7 @@ func TestNoPool(t *testing.T) {
}
{
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: nil})
stream, err := om.Query().QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: nil})
require.Nil(t, err)
resp, err := stream.Recv()
@ -54,12 +49,10 @@ func TestNoPool(t *testing.T) {
}
func TestNoTickets(t *testing.T) {
om := e2e.New(t)
q := om.MustQueryServiceGRPC()
om := newOM(t)
{
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
stream, err := om.Query().QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
@ -68,7 +61,7 @@ func TestNoTickets(t *testing.T) {
}
{
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: &pb.Pool{}})
stream, err := om.Query().QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
@ -78,7 +71,7 @@ func TestNoTickets(t *testing.T) {
}
func TestPaging(t *testing.T) {
om := e2e.New(t)
om := newOM(t)
pageSize := 10 // TODO: read from config
if pageSize < 1 {
@ -88,17 +81,15 @@ func TestPaging(t *testing.T) {
totalTickets := pageSize*5 + 1
expectedIds := map[string]struct{}{}
fe := om.MustFrontendGRPC()
for i := 0; i < totalTickets; i++ {
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.NotNil(t, resp)
require.Nil(t, err)
expectedIds[resp.Id] = struct{}{}
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
stream, err := om.Query().QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
foundIds := map[string]struct{}{}
@ -107,7 +98,7 @@ func TestPaging(t *testing.T) {
var resp *pb.QueryTicketsResponse
resp, err = stream.Recv()
require.Nil(t, err)
require.Equal(t, len(resp.Tickets), pageSize)
require.Equal(t, pageSize, len(resp.Tickets))
for _, ticket := range resp.Tickets {
foundIds[ticket.Id] = struct{}{}
@ -129,10 +120,12 @@ func TestPaging(t *testing.T) {
func TestTicketFound(t *testing.T) {
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Run("QueryTickets_"+tc.Name, func(t *testing.T) {
if !returnedByQuery(t, tc) {
require.Fail(t, "Expected to find ticket in pool but didn't.")
}
})
t.Run("QueryTicketIds_"+tc.Name, func(t *testing.T) {
if !returnedByQueryID(t, tc) {
require.Fail(t, "Expected to find id in pool but didn't.")
}
@ -143,10 +136,12 @@ func TestTicketFound(t *testing.T) {
func TestTicketNotFound(t *testing.T) {
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Run("QueryTickets_"+tc.Name, func(t *testing.T) {
if returnedByQuery(t, tc) {
require.Fail(t, "Expected to not find ticket in pool but did.")
}
})
t.Run("QueryTicketIds_"+tc.Name, func(t *testing.T) {
if returnedByQueryID(t, tc) {
require.Fail(t, "Expected to not find id in pool but did.")
}
@ -155,17 +150,15 @@ func TestTicketNotFound(t *testing.T) {
}
func returnedByQuery(t *testing.T, tc testcases.TestCase) (found bool) {
om := e2e.New(t)
om := newOM(t)
{
fe := om.MustFrontendGRPC()
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
require.NotNil(t, resp)
require.Nil(t, err)
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: tc.Pool})
stream, err := om.Query().QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: tc.Pool})
require.Nil(t, err)
tickets := []*pb.Ticket{}
@ -187,17 +180,15 @@ func returnedByQuery(t *testing.T, tc testcases.TestCase) (found bool) {
}
func returnedByQueryID(t *testing.T, tc testcases.TestCase) (found bool) {
om := e2e.New(t)
om := newOM(t)
{
fe := om.MustFrontendGRPC()
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
resp, err := om.Frontend().CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
require.NotNil(t, resp)
require.Nil(t, err)
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: tc.Pool})
stream, err := om.Query().QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: tc.Pool})
require.Nil(t, err)
ids := []string{}

View File

@ -0,0 +1,600 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"io"
"testing"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/pkg/pb"
)
// TestAssignTickets covers assigning multiple tickets, using two different
// assignment groups.
func TestAssignTickets(t *testing.T) {
om := newOM(t)
ctx := context.Background()
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t3, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
{
TicketIds: []string{t2.Id, t3.Id},
Assignment: &pb.Assignment{Connection: "b"},
},
},
}
resp, err := om.Backend().AssignTickets(ctx, req)
require.Nil(t, err)
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
get, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
require.Nil(t, err)
require.Equal(t, "a", get.Assignment.Connection)
get, err = om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t2.Id})
require.Nil(t, err)
require.Equal(t, "b", get.Assignment.Connection)
get, err = om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t3.Id})
require.Nil(t, err)
require.Equal(t, "b", get.Assignment.Connection)
}
// TestAssignTicketsInvalidArgument covers various invalid calls to assign
// tickets.
func TestAssignTicketsInvalidArgument(t *testing.T) {
om := newOM(t)
ctx := context.Background()
ctResp, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
for _, tt := range []struct {
name string
req *pb.AssignTicketsRequest
msg string
}{
{
"missing assignment",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{},
},
},
"AssignmentGroup.Assignment is required",
},
{
"ticket used twice one group",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{ctResp.Id, ctResp.Id},
Assignment: &pb.Assignment{},
},
},
},
"Ticket id " + ctResp.Id + " is assigned multiple times in one assign tickets call.",
},
{
"ticket used twice two groups",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{ctResp.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
{
TicketIds: []string{ctResp.Id},
Assignment: &pb.Assignment{Connection: "b"},
},
},
},
"Ticket id " + ctResp.Id + " is assigned multiple times in one assign tickets call.",
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
_, err := om.Backend().AssignTickets(ctx, tt.req)
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
require.Equal(t, tt.msg, status.Convert(err).Message())
})
}
}
// TestAssignTicketsMissingTicket covers that when a ticket was deleted before
// being assigned, the assign tickets calls succeeds, however it returns a
// notice that the ticket was missing.
func TestAssignTicketsMissingTicket(t *testing.T) {
om := newOM(t)
ctx := context.Background()
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t2, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
t3, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
_, err = om.Frontend().DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: t2.Id})
require.Nil(t, err)
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id, t2.Id, t3.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
},
}
resp, err := om.Backend().AssignTickets(ctx, req)
require.Nil(t, err)
require.Equal(t, &pb.AssignTicketsResponse{
Failures: []*pb.AssignmentFailure{
{
TicketId: t2.Id,
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
},
},
}, resp)
}
func TestTicketDelete(t *testing.T) {
om := newOM(t)
ctx := context.Background()
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
_, err = om.Frontend().DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: t1.Id})
require.Nil(t, err)
resp, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
require.Nil(t, resp)
require.Equal(t, "Ticket id:"+t1.Id+" not found", status.Convert(err).Message())
require.Equal(t, codes.NotFound, status.Convert(err).Code())
}
// TestEmptyReleaseTicketsRequest covers that it is valid to not have any ticket
// ids when releasing tickets. (though it's not really doing anything...)
func TestEmptyReleaseTicketsRequest(t *testing.T) {
om := newOM(t)
ctx := context.Background()
resp, err := om.Backend().ReleaseTickets(ctx, &pb.ReleaseTicketsRequest{
TicketIds: nil,
})
require.Nil(t, err)
require.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
}
// TestReleaseTickets covers that tickets returned from matches are no longer
// returned by query tickets, but will return after being released.
func TestReleaseTickets(t *testing.T) {
om := newOM(t)
ctx := context.Background()
var ticket *pb.Ticket
{ // Create ticket
var err error
ticket, err = om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
require.NotEmpty(t, ticket.Id)
}
{ // Ticket present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Tickets, 1)
require.Equal(t, ticket.Id, resp.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
var matchReturnedAt time.Time
{ // Ticket returned from match
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{ticket},
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
m := <-in
_, ok := <-in
require.False(t, ok)
matchReturnedAt = time.Now()
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "test-profile",
Pools: []*pb.Pool{
{Name: "pool"},
},
},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Match.Tickets, 1)
require.Equal(t, ticket.Id, resp.Match.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Ticket NOT present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Return ticket
resp, err := om.Backend().ReleaseTickets(ctx, &pb.ReleaseTicketsRequest{
TicketIds: []string{ticket.Id},
})
require.Nil(t, err)
require.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
}
{ // Ticket present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Tickets, 1)
require.Equal(t, ticket.Id, resp.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
// Ensure that the release timeout did NOT have enough time to affect this
// test.
require.True(t, time.Since(matchReturnedAt) < pendingReleaseTimeout, "%s", time.Since(matchReturnedAt))
}
// TestReleaseAllTickets covers that tickets are released and returned by query
// after calling ReleaseAllTickets. Does test available fetch matches, not
// after fetch matches, as that's covered by TestReleaseTickets.
func TestReleaseAllTickets(t *testing.T) {
om := newOM(t)
ctx := context.Background()
var ticket *pb.Ticket
{ // Create ticket
var err error
ticket, err = om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
require.NotEmpty(t, ticket.Id)
}
var matchReturnedAt time.Time
{ // Ticket returned from match
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{ticket},
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
m := <-in
_, ok := <-in
require.False(t, ok)
matchReturnedAt = time.Now()
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "test-profile",
Pools: []*pb.Pool{
{Name: "pool"},
},
},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Match.Tickets, 1)
require.Equal(t, ticket.Id, resp.Match.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Return ticket
resp, err := om.Backend().ReleaseAllTickets(ctx, &pb.ReleaseAllTicketsRequest{})
require.Nil(t, err)
require.Equal(t, &pb.ReleaseAllTicketsResponse{}, resp)
}
{ // Ticket present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Tickets, 1)
require.Equal(t, ticket.Id, resp.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
// Ensure that the release timeout did NOT have enough time to affect this
// test.
require.True(t, time.Since(matchReturnedAt) < pendingReleaseTimeout, "%s", time.Since(matchReturnedAt))
}
// TestReleaseTickets covers that tickets are released after a time if returned
// by a match but not assigned
func TestTicketReleaseByTimeout(t *testing.T) {
om := newOM(t)
ctx := context.Background()
var ticket *pb.Ticket
{ // Create ticket
var err error
ticket, err = om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
require.NotEmpty(t, ticket.Id)
}
{ // Ticket present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Tickets, 1)
require.Equal(t, ticket.Id, resp.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Ticket returned from match
om.SetMMF(func(ctx context.Context, profile *pb.MatchProfile, out chan<- *pb.Match) error {
out <- &pb.Match{
MatchId: "1",
Tickets: []*pb.Ticket{ticket},
}
return nil
})
om.SetEvaluator(func(ctx context.Context, in <-chan *pb.Match, out chan<- string) error {
m := <-in
_, ok := <-in
require.False(t, ok)
out <- m.MatchId
return nil
})
stream, err := om.Backend().FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MMFConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "test-profile",
Pools: []*pb.Pool{
{Name: "pool"},
},
},
})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Match.Tickets, 1)
require.Equal(t, ticket.Id, resp.Match.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Ticket NOT present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
{ // Return ticket
time.Sleep(pendingReleaseTimeout)
}
{ // Ticket present in query
stream, err := om.Query().QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
resp, err := stream.Recv()
require.Nil(t, err)
require.Len(t, resp.Tickets, 1)
require.Equal(t, ticket.Id, resp.Tickets[0].Id)
resp, err = stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
}
// TestCreateTicketErrors covers invalid arguments when calling create ticket.
func TestCreateTicketErrors(t *testing.T) {
for _, tt := range []struct {
name string
req *pb.CreateTicketRequest
msg string
}{
{
"missing ticket",
&pb.CreateTicketRequest{
Ticket: nil,
},
".ticket is required",
},
{
"already has assignment",
&pb.CreateTicketRequest{
Ticket: &pb.Ticket{
Assignment: &pb.Assignment{},
},
},
"tickets cannot be created with an assignment",
},
{
"already has create time",
&pb.CreateTicketRequest{
Ticket: &pb.Ticket{
CreateTime: ptypes.TimestampNow(),
},
},
"tickets cannot be created with create time set",
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
om := newOM(t)
ctx := context.Background()
resp, err := om.Frontend().CreateTicket(ctx, tt.req)
require.Nil(t, resp)
s := status.Convert(err)
require.Equal(t, codes.InvalidArgument, s.Code())
require.Equal(t, s.Message(), tt.msg)
})
}
}
// TestAssignedTicketsNotReturnedByQuery covers that when a ticket has been
// assigned, it will no longer be returned by query.
func TestAssignedTicketsNotReturnedByQuery(t *testing.T) {
om := newOM(t)
ctx := context.Background()
returned := func() bool {
stream, err := om.Query().QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
_, err = stream.Recv()
return err != io.EOF
}
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
require.True(t, returned())
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
},
}
resp, err := om.Backend().AssignTickets(ctx, req)
require.Nil(t, err)
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
require.False(t, returned())
}
// TestAssignedTicketDeleteTimeout covers assigned tickets being deleted after
// a timeout.
func TestAssignedTicketDeleteTimeout(t *testing.T) {
om := newOM(t)
ctx := context.Background()
t1, err := om.Frontend().CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.Nil(t, err)
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
},
}
resp, err := om.Backend().AssignTickets(ctx, req)
require.Nil(t, err)
require.Equal(t, &pb.AssignTicketsResponse{}, resp)
get, err := om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
require.Nil(t, err)
require.Equal(t, "a", get.Assignment.Connection)
om.AdvanceTTLTime(assignedDeleteTimeout)
get, err = om.Frontend().GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
require.Nil(t, get)
require.Equal(t, codes.NotFound, status.Convert(err).Code())
}

View File

@ -16,49 +16,22 @@
package mmf
import (
"github.com/spf13/viper"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/app"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/appmain"
"open-match.dev/open-match/pkg/pb"
)
// FunctionSettings is a collection of parameters used to customize matchfunction views.
type FunctionSettings struct {
Func MatchFunction
}
// BindServiceFor creates the match function service and binds it to the serving harness.
func BindServiceFor(mmf MatchFunction) appmain.Bind {
return func(p *appmain.Params, b *appmain.Bindings) error {
service := &matchFunctionService{
mmf: mmf,
}
// RunMatchFunction is a hook for the main() method in the main executable.
func RunMatchFunction(settings *FunctionSettings) {
app.RunApplication("functions", getCfg, func(p *rpc.ServerParams, cfg config.View) error {
return BindService(p, cfg, settings)
})
}
b.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterMatchFunctionServer(s, service)
}, pb.RegisterMatchFunctionHandlerFromEndpoint)
// BindService creates the function service to the server Params.
func BindService(p *rpc.ServerParams, cfg config.View, fs *FunctionSettings) error {
service, err := newMatchFunctionService(cfg, fs)
if err != nil {
return err
return nil
}
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterMatchFunctionServer(s, service)
}, pb.RegisterMatchFunctionHandlerFromEndpoint)
return nil
}
func getCfg() (config.View, error) {
cfg := viper.New()
cfg.Set("api.functions.hostname", "om-function")
cfg.Set("api.functions.grpcport", 50502)
cfg.Set("api.functions.httpport", 51502)
cfg.Set("api.query.hostname", "om-query")
cfg.Set("api.query.grpcport", 50503)
return cfg, nil
}

Some files were not shown because too many files have changed in this diff Show More