Compare commits

..

2 Commits

Author SHA1 Message Date
5170341b3e Disabled redis when generating static yaml resources except core ()
* Disabled redis when generating static yaml resources except core

* mitigate cloudbuild error
2020-02-03 18:32:57 -08:00
1dbbfd9326 Release 0.9 () 2020-02-03 16:28:18 -08:00
179 changed files with 3334 additions and 4375 deletions
.golangci.yamlDockerfile.base-buildDockerfile.ciMakefile
api
cloudbuild.yaml
examples
go.modgo.sum
install
internal
pkg
test
third_party
tools
tutorials
custom_evaluator
director
evaluator
frontend
matchfunction
solution
default_evaluator
matchmaker101
matchmaker102

@ -171,10 +171,17 @@ linters:
- funlen
- gochecknoglobals
- goconst
- gocritic
- gocyclo
- gofmt
- goimports
- gosec
- interfacer # deprecated - "A tool that suggests interfaces is prone to bad suggestions"
- lll
- prealloc
- scopelint
- staticcheck
- stylecheck
#linters:
# enable-all: true

@ -13,7 +13,7 @@
# limitations under the License.
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
FROM golang:1.14.0
FROM golang:1.13.4
ENV GO111MODULE=on
WORKDIR /go/src/open-match.dev/open-match

@ -34,13 +34,13 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-stretch" && \
apt-get update -y && apt-get install google-cloud-sdk google-cloud-sdk-app-engine-go -y -qq
# Install Golang
# https://github.com/docker-library/golang/blob/master/1.14/stretch/Dockerfile
# https://github.com/docker-library/golang/blob/master/1.13/stretch/Dockerfile
RUN mkdir -p /toolchain/golang
WORKDIR /toolchain/golang
RUN sudo rm -rf /usr/local/go/
# When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
RUN curl -L https://golang.org/dl/go1.14.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
RUN curl -L https://golang.org/dl/go1.13.4.linux-amd64.tar.gz | sudo tar -C /usr/local -xz
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH

@ -52,7 +52,7 @@
# If you want information on how to edit this file checkout,
# http://makefiletutorial.com/
BASE_VERSION = 0.10.0
BASE_VERSION = 0.9.0
SHORT_SHA = $(shell git rev-parse --short=7 HEAD | tr -d [:punct:])
BRANCH_NAME = $(shell git rev-parse --abbrev-ref HEAD | tr -d [:punct:])
VERSION = $(BASE_VERSION)-$(SHORT_SHA)
@ -196,7 +196,7 @@ ALL_PROTOS = $(GOLANG_PROTOS) $(SWAGGER_JSON_DOCS)
CMDS = $(notdir $(wildcard cmd/*))
# Names of the individual images, ommiting the openmatch prefix.
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-pool base-build
IMAGES = $(CMDS) mmf-go-soloduel mmf-go-pool evaluator-go-simple base-build
help:
@cat Makefile | grep ^\#\# | grep -v ^\#\#\# |cut -c 4-
@ -239,6 +239,9 @@ build-mmf-go-soloduel-image: docker build-base-build-image
build-mmf-go-pool-image: docker build-base-build-image
docker build -f test/matchfunction/Dockerfile -t $(REGISTRY)/openmatch-mmf-go-pool:$(TAG) -t $(REGISTRY)/openmatch-mmf-go-pool:$(ALTERNATE_TAG) .
build-evaluator-go-simple-image: docker build-base-build-image
docker build -f test/evaluator/Dockerfile -t $(REGISTRY)/openmatch-evaluator-go-simple:$(TAG) -t $(REGISTRY)/openmatch-evaluator-go-simple:$(ALTERNATE_TAG) .
#######################################
## push-images / push-<image name>-image: builds and pushes images to your
## container registry.
@ -726,8 +729,9 @@ service-binaries: cmd/minimatch/minimatch$(EXE_EXTENSION) cmd/swaggerui/swaggeru
service-binaries: cmd/backend/backend$(EXE_EXTENSION) cmd/frontend/frontend$(EXE_EXTENSION)
service-binaries: cmd/query/query$(EXE_EXTENSION) cmd/synchronizer/synchronizer$(EXE_EXTENSION)
example-binaries: example-mmf-binaries
example-binaries: example-mmf-binaries example-evaluator-binaries
example-mmf-binaries: examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION)
example-evaluator-binaries: test/evaluator/evaluator$(EXE_EXTENSION)
examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/examples/functions/golang/soloduel; $(GO_BUILD_COMMAND)
@ -735,6 +739,9 @@ examples/functions/golang/soloduel/soloduel$(EXE_EXTENSION): pkg/pb/query.pb.go
test/matchfunction/matchfunction$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json pkg/pb/matchfunction.pb.go pkg/pb/matchfunction.pb.gw.go api/matchfunction.swagger.json
cd $(REPOSITORY_ROOT)/test/matchfunction; $(GO_BUILD_COMMAND)
test/evaluator/evaluator$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cd $(REPOSITORY_ROOT)/test/evaluator; $(GO_BUILD_COMMAND)
tools-binaries: tools/certgen/certgen$(EXE_EXTENSION) tools/reaper/reaper$(EXE_EXTENSION)
cmd/backend/backend$(EXE_EXTENSION): pkg/pb/backend.pb.go pkg/pb/backend.pb.gw.go api/backend.swagger.json
@ -746,9 +753,6 @@ cmd/frontend/frontend$(EXE_EXTENSION): pkg/pb/frontend.pb.go pkg/pb/frontend.pb.
cmd/query/query$(EXE_EXTENSION): pkg/pb/query.pb.go pkg/pb/query.pb.gw.go api/query.swagger.json
cd $(REPOSITORY_ROOT)/cmd/query; $(GO_BUILD_COMMAND)
cmd/default-evaluator/default-evaluator$(EXE_EXTENSION): pkg/pb/evaluator.pb.go pkg/pb/evaluator.pb.gw.go api/evaluator.swagger.json
cd $(REPOSITORY_ROOT)/cmd/evaluator; $(GO_BUILD_COMMAND)
cmd/synchronizer/synchronizer$(EXE_EXTENSION): internal/ipb/synchronizer.pb.go
cd $(REPOSITORY_ROOT)/cmd/synchronizer; $(GO_BUILD_COMMAND)

@ -88,8 +88,7 @@ message ReleaseTicketsRequest{
message ReleaseTicketsResponse {}
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
message AssignmentGroup{
message AssignTicketsRequest {
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
repeated string ticket_ids = 1;
@ -97,26 +96,7 @@ message AssignmentGroup{
Assignment assignment = 2;
}
// AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status.
message AssignmentFailure {
enum Cause {
UNKNOWN = 0;
TICKET_NOT_FOUND = 1;
}
string ticket_id = 1;
Cause cause = 2;
}
message AssignTicketsRequest {
// Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied.
repeated AssignmentGroup assignments = 1;
}
message AssignTicketsResponse {
// Failures is a list of all the Tickets that failed assignment along with the cause of failure.
repeated AssignmentFailure failures = 1;
}
message AssignTicketsResponse {}
// The BackendService implements APIs to generate matches and handle ticket assignments.
service BackendService {

@ -129,37 +129,24 @@
}
},
"definitions": {
"AssignmentFailureCause": {
"type": "string",
"enum": [
"UNKNOWN",
"TICKET_NOT_FOUND"
],
"default": "UNKNOWN"
},
"openmatchAssignTicketsRequest": {
"type": "object",
"properties": {
"assignments": {
"ticket_ids": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchAssignmentGroup"
"type": "string"
},
"description": "Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied."
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
}
}
},
"openmatchAssignTicketsResponse": {
"type": "object",
"properties": {
"failures": {
"type": "array",
"items": {
"$ref": "#/definitions/openmatchAssignmentFailure"
},
"description": "Failures is a list of all the Tickets that failed assignment along with the cause of failure."
}
}
"type": "object"
},
"openmatchAssignment": {
"type": "object",
@ -178,35 +165,6 @@
},
"description": "An Assignment represents a game server assignment associated with a Ticket. Open\nmatch does not require or inspect any fields on assignment."
},
"openmatchAssignmentFailure": {
"type": "object",
"properties": {
"ticket_id": {
"type": "string"
},
"cause": {
"$ref": "#/definitions/AssignmentFailureCause"
}
},
"description": "AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status."
},
"openmatchAssignmentGroup": {
"type": "object",
"properties": {
"ticket_ids": {
"type": "array",
"items": {
"type": "string"
},
"description": "TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment."
},
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An Assignment specifies game connection related information to be associated with the TicketIds."
}
},
"description": "AssignmentGroup contains an Assignment and the Tickets to which it should be applied."
},
"openmatchDoubleRangeFilter": {
"type": "object",
"properties": {
@ -341,7 +299,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -354,19 +312,8 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
}
},
"openmatchReleaseTicketsRequest": {
"type": "object",
@ -454,11 +401,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."

@ -177,11 +177,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."

@ -20,7 +20,6 @@ option csharp_namespace = "OpenMatch";
import "api/messages.proto";
import "google/api/annotations.proto";
import "protoc-gen-swagger/options/annotations.proto";
import "google/protobuf/empty.proto";
option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
info: {
@ -61,22 +60,29 @@ message CreateTicketRequest {
Ticket ticket = 1;
}
message CreateTicketResponse {
// A Ticket object with TicketId generated.
Ticket ticket = 1;
}
message DeleteTicketRequest {
// A TicketId of a generated Ticket to be deleted.
string ticket_id = 1;
}
message DeleteTicketResponse {}
message GetTicketRequest {
// A TicketId of a generated Ticket.
string ticket_id = 1;
}
message WatchAssignmentsRequest {
message GetAssignmentsRequest {
// A TicketId of a generated Ticket to get updates on.
string ticket_id = 1;
}
message WatchAssignmentsResponse {
message GetAssignmentsResponse {
// An updated Assignment of the requested Ticket.
Assignment assignment = 1;
}
@ -87,7 +93,7 @@ service FrontendService {
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
rpc CreateTicket(CreateTicketRequest) returns (Ticket) {
rpc CreateTicket(CreateTicketRequest) returns (CreateTicketResponse) {
option (google.api.http) = {
post: "/v1/frontendservice/tickets"
body: "*"
@ -98,7 +104,7 @@ service FrontendService {
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
rpc DeleteTicket(DeleteTicketRequest) returns (google.protobuf.Empty) {
rpc DeleteTicket(DeleteTicketRequest) returns (DeleteTicketResponse) {
option (google.api.http) = {
delete: "/v1/frontendservice/tickets/{ticket_id}"
};
@ -111,10 +117,10 @@ service FrontendService {
};
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
rpc WatchAssignments(WatchAssignmentsRequest)
returns (stream WatchAssignmentsResponse) {
rpc GetAssignments(GetAssignmentsRequest)
returns (stream GetAssignmentsResponse) {
option (google.api.http) = {
get: "/v1/frontendservice/tickets/{ticket_id}/assignments"
};

@ -32,7 +32,7 @@
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/openmatchTicket"
"$ref": "#/definitions/openmatchCreateTicketResponse"
}
},
"404": {
@ -97,7 +97,7 @@
"200": {
"description": "A successful response.",
"schema": {
"properties": {}
"$ref": "#/definitions/openmatchDeleteTicketResponse"
}
},
"404": {
@ -124,13 +124,13 @@
},
"/v1/frontendservice/tickets/{ticket_id}/assignments": {
"get": {
"summary": "WatchAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "WatchAssignments",
"summary": "GetAssignments stream back Assignment of the specified TicketId if it is updated.\n - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.",
"operationId": "GetAssignments",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchWatchAssignmentsResponse"
"$ref": "#/x-stream-definitions/openmatchGetAssignmentsResponse"
}
},
"404": {
@ -183,6 +183,27 @@
}
}
},
"openmatchCreateTicketResponse": {
"type": "object",
"properties": {
"ticket": {
"$ref": "#/definitions/openmatchTicket",
"description": "A Ticket object with TicketId generated."
}
}
},
"openmatchDeleteTicketResponse": {
"type": "object"
},
"openmatchGetAssignmentsResponse": {
"type": "object",
"properties": {
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Ticket."
}
}
},
"openmatchSearchFields": {
"type": "object",
"properties": {
@ -232,24 +253,10 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
},
"openmatchWatchAssignmentsResponse": {
"type": "object",
"properties": {
"assignment": {
"$ref": "#/definitions/openmatchAssignment",
"description": "An updated Assignment of the requested Ticket."
}
}
},
"protobufAny": {
"type": "object",
"properties": {
@ -292,17 +299,17 @@
}
},
"x-stream-definitions": {
"openmatchWatchAssignmentsResponse": {
"openmatchGetAssignmentsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchWatchAssignmentsResponse"
"$ref": "#/definitions/openmatchGetAssignmentsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchWatchAssignmentsResponse"
"title": "Stream result of openmatchGetAssignmentsResponse"
}
},
"externalDocs": {

@ -165,7 +165,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -178,19 +178,8 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
}
},
"openmatchRunRequest": {
"type": "object",
@ -281,11 +270,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."

@ -19,7 +19,6 @@ option csharp_namespace = "OpenMatch";
import "google/rpc/status.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
// A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an
// individual 'Player' or a 'Group' of players. Open Match will not interpret
@ -43,10 +42,6 @@ message Ticket {
// Optional, depending on the requirements of the connected systems.
map<string, google.protobuf.Any> extensions = 5;
// Create time represents the time at which this Ticket was created. It is
// populated by Open Match at the time of Ticket creation.
google.protobuf.Timestamp create_time = 6;
// Deprecated fields.
reserved 2;
}
@ -131,13 +126,11 @@ message TagPresentFilter {
string tag = 1;
}
// Pool specfies a set of criteria that are used to select a subset of Tickets
// that meet all the criteria.
message Pool {
// A developer-chosen human-readable name for this Pool.
string name = 1;
// Set of Filters indicating the filtering criteria. Selected tickets must
// Set of Filters indicating the filtering criteria. Selected players must
// match every Filter.
repeated DoubleRangeFilter double_range_filters = 2;
@ -145,12 +138,6 @@ message Pool {
repeated TagPresentFilter tag_present_filters = 5;
// If specified, only Tickets created before the specified time are selected.
google.protobuf.Timestamp created_before = 6;
// If specified, only Tickets created after the specified time are selected.
google.protobuf.Timestamp created_after = 7;
// Deprecated fields.
reserved 3;
}

@ -56,46 +56,25 @@ option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = {
};
message QueryTicketsRequest {
// The Pool representing the set of Filters to be queried.
// A Pool is consists of a set of Filters.
Pool pool = 1;
}
message QueryTicketsResponse {
// Tickets that meet all the filtering criteria requested by the pool.
// Tickets that satisfy all the filtering criteria.
repeated Ticket tickets = 1;
}
message QueryTicketIdsRequest {
// The Pool representing the set of Filters to be queried.
Pool pool = 1;
}
message QueryTicketIdsResponse {
// TicketIDs that meet all the filtering criteria requested by the pool.
repeated string ids = 1;
}
// The QueryService service implements helper APIs for Match Function to query Tickets from state storage.
service QueryService {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
rpc QueryTickets(QueryTicketsRequest) returns (stream QueryTicketsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/tickets:query"
body: "*"
};
}
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
rpc QueryTicketIds(QueryTicketIdsRequest) returns (stream QueryTicketIdsResponse) {
option (google.api.http) = {
post: "/v1/queryservice/ticketids:query"
body: "*"
};
}
}

@ -24,43 +24,9 @@
"application/json"
],
"paths": {
"/v1/queryservice/ticketids:query": {
"post": {
"summary": "QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.\n - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.\nQueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"operationId": "QueryTicketIds",
"responses": {
"200": {
"description": "A successful response.(streaming responses)",
"schema": {
"$ref": "#/x-stream-definitions/openmatchQueryTicketIdsResponse"
}
},
"404": {
"description": "Returned when the resource does not exist.",
"schema": {
"type": "string",
"format": "string"
}
}
},
"parameters": [
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/openmatchQueryTicketIdsRequest"
}
}
],
"tags": [
"QueryService"
]
}
},
"/v1/queryservice/tickets:query": {
"post": {
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `storage.pool.size` and stream back responses.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.",
"summary": "QueryTickets gets a list of Tickets that match all Filters of the input Pool.\n - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.\nQueryTickets pages the Tickets by `storage.pool.size` and stream back response.\n - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000",
"operationId": "QueryTickets",
"responses": {
"200": {
@ -143,7 +109,7 @@
"items": {
"$ref": "#/definitions/openmatchDoubleRangeFilter"
},
"description": "Set of Filters indicating the filtering criteria. Selected tickets must\nmatch every Filter."
"description": "Set of Filters indicating the filtering criteria. Selected players must\nmatch every Filter."
},
"string_equals_filters": {
"type": "array",
@ -156,38 +122,6 @@
"items": {
"$ref": "#/definitions/openmatchTagPresentFilter"
}
},
"created_before": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created before the specified time are selected."
},
"created_after": {
"type": "string",
"format": "date-time",
"description": "If specified, only Tickets created after the specified time are selected."
}
},
"description": "Pool specfies a set of criteria that are used to select a subset of Tickets\nthat meet all the criteria."
},
"openmatchQueryTicketIdsRequest": {
"type": "object",
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
}
}
},
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"ids": {
"type": "array",
"items": {
"type": "string"
},
"description": "TicketIDs that meet all the filtering criteria requested by the pool."
}
}
},
@ -196,7 +130,7 @@
"properties": {
"pool": {
"$ref": "#/definitions/openmatchPool",
"description": "The Pool representing the set of Filters to be queried."
"description": "A Pool is consists of a set of Filters."
}
}
},
@ -208,7 +142,7 @@
"items": {
"$ref": "#/definitions/openmatchTicket"
},
"description": "Tickets that meet all the filtering criteria requested by the pool."
"description": "Tickets that satisfy all the filtering criteria."
}
}
},
@ -283,11 +217,6 @@
"$ref": "#/definitions/protobufAny"
},
"description": "Customized information not inspected by Open Match, to be used by the match\nmaking function, evaluator, and components making calls to Open Match.\nOptional, depending on the requirements of the connected systems."
},
"create_time": {
"type": "string",
"format": "date-time",
"description": "Create time represents the time at which this Ticket was created. It is\npopulated by Open Match at the time of Ticket creation."
}
},
"description": "A Ticket is a basic matchmaking entity in Open Match. A Ticket represents either an\nindividual 'Player' or a 'Group' of players. Open Match will not interpret\nwhat the Ticket represents but just treat it as a matchmaking unit with a set\nof SearchFields. Open Match stores the Ticket in state storage and enables an\nAssignment to be associated with this Ticket."
@ -334,18 +263,6 @@
}
},
"x-stream-definitions": {
"openmatchQueryTicketIdsResponse": {
"type": "object",
"properties": {
"result": {
"$ref": "#/definitions/openmatchQueryTicketIdsResponse"
},
"error": {
"$ref": "#/definitions/runtimeStreamError"
}
},
"title": "Stream result of openmatchQueryTicketIdsResponse"
},
"openmatchQueryTicketsResponse": {
"type": "object",
"properties": {

@ -164,7 +164,7 @@ artifacts:
- install/yaml/06-open-match-override-configmap.yaml
substitutions:
_OM_VERSION: "0.10.0"
_OM_VERSION: "0.9.0"
_GCB_POST_SUBMIT: "0"
_GCB_LATEST_VERSION: "undefined"
logsBucket: 'gs://open-match-build-logs/'

@ -102,7 +102,7 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
if err != nil {
panic(err)
}
ticketId = resp.Id
ticketId = resp.Ticket.Id
}
//////////////////////////////////////////////////////////////////////////////
@ -111,11 +111,11 @@ func runScenario(ctx context.Context, name string, update updater.SetFunc) {
var assignment *pb.Assignment
{
req := &pb.WatchAssignmentsRequest{
req := &pb.GetAssignmentsRequest{
TicketId: ticketId,
}
stream, err := fe.WatchAssignments(ctx, req)
stream, err := fe.GetAssignments(ctx, req)
for assignment.GetConnection() == "" {
resp, err := stream.Recv()
if err != nil {

@ -131,13 +131,9 @@ func run(ds *components.DemoShared) {
}
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
}

@ -1,20 +0,0 @@
## How to use this framework
This is the framework that we use to benchmark Open Match against different matchmaking scenarios. For now (02/24/2020), this framework supports a Battle Royale, a Basic 1v1 matchmaking, and a Team Shooter scenario. You are welcome to write up your own `Scenario`, test it, and share the number that you are able to get to us.
1. The `Scenario` struct under the `scenarios/scenarios.go` file defines the parameters that this framework currently support/plan to support.
2. Each subpackage `battleroyal`, `firstmatch`, and `teamshooter` implements to `GameScenario` interface defined under `scenarios/scenarios.go` file. Feel free to write your own benchmark scenario by implementing the interface.
- Ticket `func() *pb.Ticket` - Tickets generator
- Profiles `func() []*pb.MatchProfile` - Profiles generator
- MMF `MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)` - Custom matchmaking logic using a MatchProfile and a map struct that contains the mapping from pool name to the tickets of that pool.
- Evaluate `Evaluate(stream pb.Evaluator_EvaluateServer) error` - Custom logic implementation of the evaluator.
Follow the instructions below if you want to use any of the existing benchmarking scenarios.
1. Open the `scenarios.go` file under the scenarios directory.
2. Change the value of the `ActiveScenario` variable to the scenario that you would like Open Match to run against.
3. Make sure you have `kubectl` connected to an existing Kubernetes cluster and run `make push-images` followed by `make install-scale-chart` to push the images and install Open Match core along with the scale components in the cluster.
4. Run `make proxy`
- Open `localhost:3000` to see the Grafana dashboards.
- Open `localhost:9090` to see the Prometheus query server.
- Open `localhost:[COMPONENT_HTTP_ENDPOINT]/help` to see how to access the zpages.

@ -38,6 +38,7 @@ var (
})
activeScenario = scenarios.ActiveScenario
statProcessor = scenarios.NewStatProcessor()
mIterations = telemetry.Counter("scale_backend_iterations", "fetch match iterations")
mFetchMatchCalls = telemetry.Counter("scale_backend_fetch_match_calls", "fetch match calls")
@ -75,6 +76,8 @@ func run(cfg config.View) {
defer feConn.Close()
fe := pb.NewFrontendServiceClient(feConn)
startTime := time.Now()
w := logger.Writer()
defer w.Close()
@ -91,6 +94,7 @@ func run(cfg config.View) {
for range time.Tick(time.Millisecond * 250) {
// Keep pulling matches from Open Match backend
profiles := activeScenario.Profiles()
statProcessor.SetStat("TotalProfiles", len(profiles))
var wg sync.WaitGroup
for _, p := range profiles {
@ -103,7 +107,9 @@ func run(cfg config.View) {
// Wait for all profiles to complete before proceeding.
wg.Wait()
statProcessor.SetStat("TimeElapsed", time.Since(startTime).String())
telemetry.RecordUnitMeasurement(context.Background(), mIterations)
statProcessor.Log(w)
}
}
@ -124,7 +130,7 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
stream, err := be.FetchMatches(ctx, req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
logger.WithError(err).Error("failed to get available stream client")
statProcessor.RecordError("failed to get available stream client", err)
return
}
@ -138,12 +144,13 @@ func runFetchMatches(be pb.BackendServiceClient, p *pb.MatchProfile, matchesForA
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mFetchMatchErrors)
logger.WithError(err).Error("failed to get matches from stream client")
statProcessor.RecordError("failed to get matches from stream client", err)
return
}
telemetry.RecordNUnitMeasurement(ctx, mSumTicketsReturned, int64(len(resp.GetMatch().Tickets)))
telemetry.RecordUnitMeasurement(ctx, mMatchesReturned)
statProcessor.IncrementStat("MatchCount", 1)
matchesForAssignment <- resp.GetMatch()
}
@ -160,22 +167,19 @@ func runAssignments(be pb.BackendServiceClient, matchesForAssignment <-chan *pb.
if activeScenario.BackendAssignsTickets {
_, err := be.AssignTickets(context.Background(), &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
},
TicketIds: ids,
Assignment: &pb.Assignment{
Connection: fmt.Sprintf("%d.%d.%d.%d:2222", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)),
},
})
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mMatchAssignsFailed)
logger.WithError(err).Error("failed to assign tickets")
statProcessor.RecordError("failed to assign tickets", err)
continue
}
telemetry.RecordUnitMeasurement(ctx, mMatchesAssigned)
statProcessor.IncrementStat("Assigned", len(ids))
}
for _, id := range ids {
@ -197,9 +201,10 @@ func runDeletions(fe pb.FrontendServiceClient, ticketsForDeletion <-chan string)
if err == nil {
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
statProcessor.IncrementStat("Deleted", 1)
} else {
telemetry.RecordUnitMeasurement(ctx, mTicketDeletesFailed)
logger.WithError(err).Error("failed to delete tickets")
statProcessor.RecordError("failed to delete tickets", err)
}
}
}

@ -16,12 +16,11 @@ package frontend
import (
"context"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"open-match.dev/open-match/examples/scale/scenarios"
"open-match.dev/open-match/internal/config"
@ -35,12 +34,14 @@ var (
"app": "openmatch",
"component": "scale.frontend",
})
activeScenario = scenarios.ActiveScenario
activeScenario = scenarios.ActiveScenario
statProcessor = scenarios.NewStatProcessor()
numOfRoutineCreate = 8
totalCreated uint32
mTicketsCreated = telemetry.Counter("scale_frontend_tickets_created", "tickets created")
mTicketCreationsFailed = telemetry.Counter("scale_frontend_ticket_creations_failed", "tickets created")
mRunnersWaiting = concurrentGauge(telemetry.Gauge("scale_frontend_runners_waiting", "runners waiting"))
mRunnersCreating = concurrentGauge(telemetry.Gauge("scale_frontend_runners_creating", "runners creating"))
)
// Run triggers execution of the scale frontend component that creates
@ -60,92 +61,75 @@ func run(cfg config.View) {
}
fe := pb.NewFrontendServiceClient(conn)
w := logger.Writer()
defer w.Close()
ticketQPS := int(activeScenario.FrontendTicketCreatedQPS)
ticketTotal := activeScenario.FrontendTotalTicketsToCreate
totalCreated := 0
for {
currentCreated := int(atomic.LoadUint32(&totalCreated))
if ticketTotal != -1 && currentCreated >= ticketTotal {
break
}
for range time.Tick(time.Second) {
for i := 0; i < ticketQPS; i++ {
if ticketTotal == -1 || totalCreated < ticketTotal {
go runner(fe)
// Each inner loop creates TicketCreatedQPS tickets
var ticketPerRoutine, ticketModRoutine int
start := time.Now()
if ticketTotal == -1 || currentCreated+ticketQPS <= ticketTotal {
ticketPerRoutine = ticketQPS / numOfRoutineCreate
ticketModRoutine = ticketQPS % numOfRoutineCreate
} else {
ticketPerRoutine = (ticketTotal - currentCreated) / numOfRoutineCreate
ticketModRoutine = (ticketTotal - currentCreated) % numOfRoutineCreate
}
var wg sync.WaitGroup
for i := 0; i < numOfRoutineCreate; i++ {
wg.Add(1)
if i < ticketModRoutine {
go createPerCycle(&wg, fe, ticketPerRoutine+1, start)
} else {
go createPerCycle(&wg, fe, ticketPerRoutine, start)
}
}
// Wait for all concurrent creates to complete.
wg.Wait()
statProcessor.SetStat("TotalCreated", atomic.LoadUint32(&totalCreated))
statProcessor.Log(w)
}
}
func runner(fe pb.FrontendServiceClient) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
func createPerCycle(wg *sync.WaitGroup, fe pb.FrontendServiceClient, ticketPerRoutine int, start time.Time) {
defer wg.Done()
cycleCreated := 0
g := stateGauge{}
defer g.stop()
for j := 0; j < ticketPerRoutine; j++ {
req := &pb.CreateTicketRequest{
Ticket: activeScenario.Ticket(),
}
g.start(mRunnersWaiting)
// A random sleep at the start of the worker evens calls out over the second
// period, and makes timing between ticket creation calls a more realistic
// poisson distribution.
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
ctx, span := trace.StartSpan(context.Background(), "scale.frontend/CreateTicket")
defer span.End()
g.start(mRunnersCreating)
id, err := createTicket(ctx, fe)
if err != nil {
logger.WithError(err).Error("failed to create a ticket")
return
timeLeft := start.Add(time.Second).Sub(time.Now())
if timeLeft <= 0 {
break
}
ticketsLeft := ticketPerRoutine - cycleCreated
time.Sleep(timeLeft / time.Duration(ticketsLeft))
if _, err := fe.CreateTicket(ctx, req); err == nil {
cycleCreated++
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
} else {
statProcessor.RecordError("failed to create a ticket", err)
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
}
}
_ = id
}
func createTicket(ctx context.Context, fe pb.FrontendServiceClient) (string, error) {
ctx, span := trace.StartSpan(ctx, "scale.frontend/CreateTicket")
defer span.End()
req := &pb.CreateTicketRequest{
Ticket: activeScenario.Ticket(),
}
resp, err := fe.CreateTicket(ctx, req)
if err != nil {
telemetry.RecordUnitMeasurement(ctx, mTicketCreationsFailed)
return "", err
}
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
return resp.Id, nil
}
// Allows concurrent moficiation of a gauge value by modifying the concurrent
// value with a delta.
func concurrentGauge(s *stats.Int64Measure) func(delta int64) {
m := sync.Mutex{}
v := int64(0)
return func(delta int64) {
m.Lock()
defer m.Unlock()
v += delta
telemetry.SetGauge(context.Background(), s, v)
}
}
// stateGauge will have a single value be applied to one gauge at a time.
type stateGauge struct {
f func(int64)
}
// start begins a stage measured in a gauge, stopping any previously started
// stage.
func (g *stateGauge) start(f func(int64)) {
g.stop()
g.f = f
f(1)
}
// stop finishes the current stage by decrementing the gauge.
func (g *stateGauge) stop() {
if g.f != nil {
g.f(-1)
g.f = nil
}
atomic.AddUint32(&totalCreated, uint32(cycleCreated))
}

@ -0,0 +1,85 @@
package scenarios
import (
"fmt"
"math/rand"
"time"
"open-match.dev/open-match/pkg/pb"
)
const (
battleRoyalRegions = 20
regionArg = "region"
)
var (
battleRoyalScenario = &Scenario{
MMF: queryPoolsWrapper(battleRoyalMmf),
Evaluator: fifoEvaluate,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: battleRoyalTicket,
Profiles: battleRoyalProfile,
}
)
func battleRoyalProfile() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for i := 0; i < battleRoyalRegions; i++ {
p = append(p, &pb.MatchProfile{
Name: battleRoyalRegionName(i),
Pools: []*pb.Pool{
{
Name: poolName,
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: regionArg,
Value: battleRoyalRegionName(i),
},
},
},
},
})
}
return p
}
func battleRoyalTicket() *pb.Ticket {
// Simple way to give an uneven distribution of region population.
a := rand.Intn(battleRoyalRegions) + 1
r := rand.Intn(a)
return &pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
regionArg: battleRoyalRegionName(r),
},
},
}
}
func battleRoyalMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
var matches []*pb.Match
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[i : i+playersInMatch],
MatchProfile: p.GetName(),
MatchFunction: "battleRoyal",
})
}
return matches, nil
}
func battleRoyalRegionName(i int) string {
return fmt.Sprintf("region_%d", i)
}

@ -1,141 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package battleroyal
import (
"fmt"
"io"
"math/rand"
"time"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
regionArg = "region"
)
func battleRoyalRegionName(i int) string {
return fmt.Sprintf("region_%d", i)
}
func Scenario() *BattleRoyalScenario {
return &BattleRoyalScenario{
regions: 20,
}
}
type BattleRoyalScenario struct {
regions int
}
func (b *BattleRoyalScenario) Profiles() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for i := 0; i < b.regions; i++ {
p = append(p, &pb.MatchProfile{
Name: battleRoyalRegionName(i),
Pools: []*pb.Pool{
{
Name: poolName,
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: regionArg,
Value: battleRoyalRegionName(i),
},
},
},
},
})
}
return p
}
func (b *BattleRoyalScenario) Ticket() *pb.Ticket {
// Simple way to give an uneven distribution of region population.
a := rand.Intn(b.regions) + 1
r := rand.Intn(a)
return &pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
regionArg: battleRoyalRegionName(r),
},
},
}
}
func (b *BattleRoyalScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
const playersInMatch = 100
tickets := poolTickets[poolName]
var matches []*pb.Match
for i := 0; i+playersInMatch <= len(tickets); i += playersInMatch {
matches = append(matches, &pb.Match{
MatchId: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
Tickets: tickets[i : i+playersInMatch],
MatchProfile: p.GetName(),
MatchFunction: "battleRoyal",
})
}
return matches, nil
}
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (b *BattleRoyalScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the
// same time, don't buffer, just send results immediately.
matchIDs := []string{}
outer:
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Error reading evaluator input stream: %w", err)
}
m := req.GetMatch()
for _, t := range m.Tickets {
if _, ok := used[t.Id]; ok {
continue outer
}
}
for _, t := range m.Tickets {
used[t.Id] = struct{}{}
}
matchIDs = append(matchIDs, m.GetMatchId())
}
for _, mID := range matchIDs {
err := stream.Send(&pb.EvaluateResponse{MatchId: mID})
if err != nil {
return fmt.Errorf("Error sending evaluator output stream: %w", err)
}
}
return nil
}

@ -1,18 +1,4 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firstmatch
package scenarios
import (
"fmt"
@ -26,14 +12,20 @@ const (
poolName = "all"
)
func Scenario() *FirstMatchScenario {
return &FirstMatchScenario{}
}
var (
firstMatchScenario = &Scenario{
MMF: queryPoolsWrapper(firstMatchMmf),
Evaluator: fifoEvaluate,
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: firstMatchTicket,
Profiles: firstMatchProfile,
}
)
type FirstMatchScenario struct {
}
func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
func firstMatchProfile() []*pb.MatchProfile {
return []*pb.MatchProfile{
{
Name: "entirePool",
@ -46,11 +38,11 @@ func (_ *FirstMatchScenario) Profiles() []*pb.MatchProfile {
}
}
func (_ *FirstMatchScenario) Ticket() *pb.Ticket {
func firstMatchTicket() *pb.Ticket {
return &pb.Ticket{}
}
func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
func firstMatchMmf(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
tickets := poolTickets[poolName]
var matches []*pb.Match
@ -68,7 +60,7 @@ func (_ *FirstMatchScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[s
// fifoEvaluate accepts all matches which don't contain the same ticket as in a
// previously accepted match. Essentially first to claim the ticket wins.
func (_ *FirstMatchScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
func fifoEvaluate(stream pb.Evaluator_EvaluateServer) error {
used := map[string]struct{}{}
// TODO: once the evaluator client supports sending and recieving at the

@ -14,65 +14,10 @@
package scenarios
import (
"sync"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/examples/scale/scenarios/battleroyal"
"open-match.dev/open-match/examples/scale/scenarios/firstmatch"
"open-match.dev/open-match/examples/scale/scenarios/teamshooter"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
var (
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",
})
)
// GameScenario defines what tickets look like, and how they should be matched.
type GameScenario interface {
// Ticket creates a new ticket, with randomized parameters.
Ticket() *pb.Ticket
// Profiles lists all of the profiles that should run.
Profiles() []*pb.MatchProfile
// MatchFunction is the custom logic implementation of the match function.
MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error)
// Evaluate is the custom logic implementation of the evaluator.
Evaluate(stream pb.Evaluator_EvaluateServer) error
}
import "open-match.dev/open-match/pkg/pb"
// ActiveScenario sets the scenario with preset parameters that we want to use for current Open Match benchmark run.
var ActiveScenario = func() *Scenario {
var gs GameScenario = firstmatch.Scenario()
// TODO: Select which scenario to use based on some configuration or choice,
// so it's easier to run different scenarios without changing code.
gs = battleroyal.Scenario()
gs = teamshooter.Scenario()
return &Scenario{
FrontendTotalTicketsToCreate: -1,
FrontendTicketCreatedQPS: 100,
BackendAssignsTickets: true,
BackendDeletesTickets: true,
Ticket: gs.Ticket,
Profiles: gs.Profiles,
MMF: queryPoolsWrapper(gs.MatchFunction),
Evaluator: gs.Evaluate,
}
}()
var ActiveScenario = battleRoyalScenario
// Scenario defines the controllable fields for Open Match benchmark scenarios
type Scenario struct {
@ -113,44 +58,3 @@ func (mmf matchFunction) Run(req *pb.RunRequest, srv pb.MatchFunction_RunServer)
func (eval evaluatorFunction) Evaluate(srv pb.Evaluator_EvaluateServer) error {
return eval(srv)
}
func getQueryServiceGRPCClient() pb.QueryServiceClient {
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
startQ.Do(func() {
q = getQueryServiceGRPCClient()
})
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolTickets)
if err != nil {
return err
}
logger.WithFields(logrus.Fields{
"proposals": proposals,
}).Trace("proposals returned by match function")
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
return err
}
}
return nil
}
}

@ -1,330 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TeamShooterScenario is a scenario which is designed to emulate the
// approximate behavior to open match that a skill based team game would have.
// It doesn't try to provide good matchmaking for real players. There are three
// arguments used:
// mode: The game mode the players wants to play in. mode is a hard partition.
// regions: Players may have good latency to one or more regions. A player will
// search for matches in all eligible regions.
// skill: Players have a random skill based on a normal distribution. Players
// will only be matched with other players who have a close skill value. The
// match functions have overlapping partitions of the skill brackets.
package teamshooter
import (
"fmt"
"io"
"math"
"math/rand"
"sort"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/golang/protobuf/ptypes/wrappers"
"open-match.dev/open-match/pkg/pb"
)
const (
poolName = "all"
skillArg = "skill"
modeArg = "mode"
)
// TeamShooterScenario provides the required methods for running a scenario.
type TeamShooterScenario struct {
// Names of available region tags.
regions []string
// Maximum regions a player can search in.
maxRegions int
// Number of tickets which form a match.
playersPerGame int
// For each pair of consequitive values, the value to split profiles on by
// skill.
skillBoundaries []float64
// Maximum difference between two tickets to consider a match valid.
maxSkillDifference float64
// List of mode names.
modes []string
// Returns a random mode, with some weight.
randomMode func() string
}
// Scenario creates a new TeamShooterScenario.
func Scenario() *TeamShooterScenario {
modes, randomMode := weightedChoice(map[string]int{
"pl": 100, // Payload, very popular.
"cp": 25, // Capture point, 1/4 as popular.
})
regions := []string{}
for i := 0; i < 2; i++ {
regions = append(regions, fmt.Sprintf("region_%d", i))
}
return &TeamShooterScenario{
regions: regions,
maxRegions: 1,
playersPerGame: 12,
skillBoundaries: []float64{math.Inf(-1), 0, math.Inf(1)},
maxSkillDifference: 0.01,
modes: modes,
randomMode: randomMode,
}
}
// Profiles shards the player base on mode, region, and skill.
func (t *TeamShooterScenario) Profiles() []*pb.MatchProfile {
p := []*pb.MatchProfile{}
for _, region := range t.regions {
for _, mode := range t.modes {
for i := 0; i+1 < len(t.skillBoundaries); i++ {
skillMin := t.skillBoundaries[i] - t.maxSkillDifference/2
skillMax := t.skillBoundaries[i+1] + t.maxSkillDifference/2
p = append(p, &pb.MatchProfile{
Name: fmt.Sprintf("%s_%s_%v-%v", region, mode, skillMin, skillMax),
Pools: []*pb.Pool{
{
Name: poolName,
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: skillArg,
Min: skillMin,
Max: skillMax,
},
},
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: region,
},
},
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: modeArg,
Value: mode,
},
},
},
},
})
}
}
}
return p
}
// Ticket creates a randomized player.
func (t *TeamShooterScenario) Ticket() *pb.Ticket {
region := rand.Intn(len(t.regions))
numRegions := rand.Intn(t.maxRegions) + 1
tags := []string{}
for i := 0; i < numRegions; i++ {
tags = append(tags, t.regions[region])
// The Earth is actually a circle.
region = (region + 1) % len(t.regions)
}
return &pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
skillArg: clamp(rand.NormFloat64(), -3, 3),
},
StringArgs: map[string]string{
modeArg: t.randomMode(),
},
Tags: tags,
},
}
}
// MatchFunction puts tickets into matches based on their skill, finding the
// required number of tickets for a game within the maximum skill difference.
func (t *TeamShooterScenario) MatchFunction(p *pb.MatchProfile, poolTickets map[string][]*pb.Ticket) ([]*pb.Match, error) {
skill := func(t *pb.Ticket) float64 {
return t.SearchFields.DoubleArgs[skillArg]
}
tickets := poolTickets[poolName]
var matches []*pb.Match
sort.Slice(tickets, func(i, j int) bool {
return skill(tickets[i]) < skill(tickets[j])
})
for i := 0; i+t.playersPerGame <= len(tickets); i++ {
mt := tickets[i : i+t.playersPerGame]
if skill(mt[len(mt)-1])-skill(mt[0]) < t.maxSkillDifference {
avg := float64(0)
for _, t := range mt {
avg += skill(t)
}
avg /= float64(len(mt))
q := float64(0)
for _, t := range mt {
diff := skill(t) - avg
q -= diff * diff
}
m, err := (&matchExt{
id: fmt.Sprintf("profile-%v-time-%v-%v", p.GetName(), time.Now().Format("2006-01-02T15:04:05.00"), len(matches)),
matchProfile: p.GetName(),
matchFunction: "skillmatcher",
tickets: mt,
quality: q,
}).pack()
if err != nil {
return nil, err
}
matches = append(matches, m)
}
}
return matches, nil
}
// Evaluate returns matches in order of highest quality, skipping any matches
// which contain tickets that are already used.
func (t *TeamShooterScenario) Evaluate(stream pb.Evaluator_EvaluateServer) error {
// Unpacked proposal matches.
proposals := []*matchExt{}
// Ticket ids which are used in a match.
used := map[string]struct{}{}
for {
req, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Error reading evaluator input stream: %w", err)
}
p, err := unpackMatch(req.GetMatch())
if err != nil {
return err
}
proposals = append(proposals, p)
}
// Higher quality is better.
sort.Slice(proposals, func(i, j int) bool {
return proposals[i].quality > proposals[j].quality
})
outer:
for _, p := range proposals {
for _, t := range p.tickets {
if _, ok := used[t.Id]; ok {
continue outer
}
}
for _, t := range p.tickets {
used[t.Id] = struct{}{}
}
err := stream.Send(&pb.EvaluateResponse{MatchId: p.id})
if err != nil {
return fmt.Errorf("Error sending evaluator output stream: %w", err)
}
}
return nil
}
// matchExt presents the match and extension data in a native form, and allows
// easy conversion to and from proto format.
type matchExt struct {
id string
tickets []*pb.Ticket
quality float64
matchProfile string
matchFunction string
}
func unpackMatch(m *pb.Match) (*matchExt, error) {
v := &wrappers.DoubleValue{}
err := ptypes.UnmarshalAny(m.Extensions["quality"], v)
if err != nil {
return nil, fmt.Errorf("Error unpacking match quality: %w", err)
}
return &matchExt{
id: m.MatchId,
tickets: m.Tickets,
quality: v.Value,
matchProfile: m.MatchProfile,
matchFunction: m.MatchFunction,
}, nil
}
func (m *matchExt) pack() (*pb.Match, error) {
v := &wrappers.DoubleValue{Value: m.quality}
a, err := ptypes.MarshalAny(v)
if err != nil {
return nil, fmt.Errorf("Error packing match quality: %w", err)
}
return &pb.Match{
MatchId: m.id,
Tickets: m.tickets,
MatchProfile: m.matchProfile,
MatchFunction: m.matchFunction,
Extensions: map[string]*any.Any{
"quality": a,
},
}, nil
}
func clamp(v float64, min float64, max float64) float64 {
if v < min {
return min
}
if v > max {
return max
}
return v
}
// weightedChoice takes a map of values, and their relative probability. It
// returns a list of the values, along with a function which will return random
// choices from the values with the weighted probability.
func weightedChoice(m map[string]int) ([]string, func() string) {
s := make([]string, 0, len(m))
total := 0
for k, v := range m {
s = append(s, k)
total += v
}
return s, func() string {
remainder := rand.Intn(total)
for k, v := range m {
remainder -= v
if remainder < 0 {
return k
}
}
panic("weightedChoice is broken.")
}
}

@ -0,0 +1,137 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scenarios
import (
"fmt"
"io"
"sync"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/matchfunction"
"open-match.dev/open-match/pkg/pb"
)
var (
queryServiceAddress = "om-query.open-match.svc.cluster.local:50503" // Address of the QueryService Endpoint.
logger = logrus.WithFields(logrus.Fields{
"app": "scale",
})
)
// StatProcessor uses syncMaps to store the stress test metrics and occurrence of errors.
// It can write out the data to an input io.Writer.
type StatProcessor struct {
em *sync.Map
sm *sync.Map
}
// NewStatProcessor returns an initialized StatProcessor
func NewStatProcessor() *StatProcessor {
return &StatProcessor{
em: &sync.Map{},
sm: &sync.Map{},
}
}
// SetStat sets the value for a key
func (e StatProcessor) SetStat(k string, v interface{}) {
e.sm.Store(k, v)
}
// IncrementStat atomically increments the value of a key by delta
func (e StatProcessor) IncrementStat(k string, delta interface{}) {
statRead, ok := e.sm.Load(k)
if !ok {
statRead = 0
}
switch delta.(type) {
case int:
e.sm.Store(k, statRead.(int)+delta.(int))
case float32:
e.sm.Store(k, statRead.(float32)+delta.(float32))
case float64:
e.sm.Store(k, statRead.(float64)+delta.(float64))
default:
logger.Errorf("IncrementStat: type %T not supported", delta)
}
}
// RecordError atomically records the occurrence of input errors
func (e StatProcessor) RecordError(desc string, err error) {
errMsg := fmt.Sprintf("%s: %s", desc, err.Error())
errRead, ok := e.em.Load(errMsg)
if !ok {
errRead = 0
}
e.em.Store(errMsg, errRead.(int)+1)
}
// Log writes the formatted errors and metrics to the input writer
func (e StatProcessor) Log(w io.Writer) {
e.sm.Range(func(k interface{}, v interface{}) bool {
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
return true
})
e.em.Range(func(k interface{}, v interface{}) bool {
w.Write([]byte(fmt.Sprintf("%s: %d \n", k, v)))
return true
})
}
func getQueryServiceGRPCClient() pb.QueryServiceClient {
conn, err := grpc.Dial(queryServiceAddress, testing.NewGRPCDialOptions(logger)...)
if err != nil {
logger.Fatalf("Failed to connect to Open Match, got %v", err)
}
return pb.NewQueryServiceClient(conn)
}
func queryPoolsWrapper(mmf func(req *pb.MatchProfile, pools map[string][]*pb.Ticket) ([]*pb.Match, error)) matchFunction {
var q pb.QueryServiceClient
var startQ sync.Once
return func(req *pb.RunRequest, stream pb.MatchFunction_RunServer) error {
startQ.Do(func() {
q = getQueryServiceGRPCClient()
})
poolTickets, err := matchfunction.QueryPools(stream.Context(), q, req.GetProfile().GetPools())
if err != nil {
return err
}
proposals, err := mmf(req.GetProfile(), poolTickets)
if err != nil {
return err
}
logger.WithFields(logrus.Fields{
"proposals": proposals,
}).Trace("proposals returned by match function")
for _, proposal := range proposals {
if err := stream.Send(&pb.RunResponse{Proposal: proposal}); err != nil {
return err
}
}
return nil
}
}

9
go.mod

@ -15,7 +15,7 @@ module open-match.dev/open-match
// limitations under the License.
// When updating Go version, update Dockerfile.ci, Dockerfile.base-build, and go.mod
go 1.14
go 1.13
require (
cloud.google.com/go v0.47.0 // indirect
@ -23,9 +23,8 @@ require (
contrib.go.opencensus.io/exporter/ocagent v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.8
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71
github.com/TV4/logrus-stackdriver-formatter v0.1.0
github.com/alicebob/miniredis/v2 v2.11.0
github.com/alicebob/miniredis/v2 v2.10.1
github.com/apache/thrift v0.13.0 // indirect
github.com/aws/aws-sdk-go v1.25.27 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible
@ -33,7 +32,7 @@ require (
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/golang/protobuf v1.3.2
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3
github.com/googleapis/gnostic v0.3.1 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
@ -52,10 +51,10 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stretchr/testify v1.4.0
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 // indirect
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 // indirect
golang.org/x/net v0.0.0-20191105084925-a882066a44e0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
google.golang.org/api v0.13.0 // indirect

20
go.sum

@ -20,13 +20,9 @@ contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZ
contrib.go.opencensus.io/exporter/stackdriver v0.12.8 h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=
contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71 h1:J52um+Sp3v8TpSY0wOgpjr84np+xvrY3503DRirJ6wI=
github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71/go.mod h1:E4OavwrrOME3uj3Zm9Rla8ZDqlAR5GqKA+mMIPoilYk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/FZambia/sentinel v1.0.0 h1:KJ0ryjKTZk5WMp0dXvSdNqp3lFaW1fNFuEYfrkLOYIc=
github.com/FZambia/sentinel v1.0.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -38,8 +34,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.11.0 h1:Dz6uJ4w3Llb1ZiFoqyzF9aLuzbsEWCeKwstu9MzmSAk=
github.com/alicebob/miniredis/v2 v2.11.0/go.mod h1:UA48pmi7aSazcGAvcdKcBB49z521IC9VjTTRz2nIaJE=
github.com/alicebob/miniredis/v2 v2.10.1 h1:r+hpRUqYCcIsrjxH/wRLwQGmA2nkQf4IYj7MKPwbA+s=
github.com/alicebob/miniredis/v2 v2.10.1/go.mod h1:gUxwu+6dLLmJHIXOOBlgcXqbcpPPp+NzOnBzgqFIGYA=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
@ -111,10 +107,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY=
github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible h1:1mCVU17Wc8oyVUlx1ZXpnWz1DNP6v0R5z5ElKCTvVrY=
github.com/gomodule/redigo v2.0.1-0.20191111085604-09d84710e01a+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -128,8 +122,6 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@ -178,8 +170,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
@ -273,8 +263,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/gopher-lua v0.0.0-20190206043414-8bfc7677f583/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c h1:RCby8AaF+weuP1M+nwMQ4uQYO2shgD6UFAKvnXszwTw=
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036 h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=
github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=

@ -20,6 +20,28 @@ metadata:
app: open-match-demo
release: open-match-demo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: customize-configmap
namespace: open-match-demo
labels:
app: open-match-customize
component: config
release: open-match-demo
data:
matchmaker_config_default.yaml: |-
api:
functions:
hostname: "om-function"
grpcport: 50502
httpport: 51502
matchmaker_config_override.yaml: |-
api:
query:
hostname: "om-query.open-match.svc.cluster.local"
grpcport: "50503"
---
kind: Service
apiVersion: v1
metadata:
@ -86,9 +108,21 @@ spec:
component: matchfunction
release: open-match-demo
spec:
volumes:
- name: customize-config-volume
configMap:
name: customize-configmap
- name: om-config-volume-default
configMap:
name: customize-configmap
containers:
- name: om-function
image: "gcr.io/open-match-public-images/openmatch-mmf-go-soloduel:0.10.0"
volumeMounts:
- name: customize-config-volume
mountPath: /app/config/override
- name: om-config-volume-default
mountPath: /app/config/default
image: "gcr.io/open-match-public-images/openmatch-mmf-go-soloduel:0.9.0"
ports:
- name: grpc
containerPort: 50502
@ -125,7 +159,7 @@ spec:
spec:
containers:
- name: om-demo
image: "gcr.io/open-match-public-images/openmatch-demo-first-match:0.10.0"
image: "gcr.io/open-match-public-images/openmatch-demo-first-match:0.9.0"
imagePullPolicy: Always
ports:
- name: http

@ -13,8 +13,8 @@
# limitations under the License.
apiVersion: v2
appVersion: "0.10.0"
version: 0.10.0
appVersion: "0.9.0"
version: 0.9.0
name: open-match
dependencies:
- name: redis

@ -26,7 +26,7 @@ evaluator:
enabled: false
replicas: 3
portType: ClusterIP
image: openmatch-default-evaluator
image: openmatch-evaluator-go-simple
evaluatorConfigs:
# We use harness to implement the MMFs. MMF itself only requires one configmap but harness expects two,

@ -320,100 +320,6 @@
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"fill": 1,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 19
},
"id": 24,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(scale_frontend_runners_waiting)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Runners Waiting To Start",
"refId": "A"
},
{
"expr": "sum(scale_frontend_runners_creating)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
"legendFormat": "Runners Creating Ticket",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Outstanding Frontend Runners",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,

@ -0,0 +1,108 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: scale-configmap
namespace: {{ .Release.Namespace }}
annotations: {{- include "openmatch.chartmeta" . | nindent 4 }}
labels:
app: {{ template "openmatch.name" . }}
component: config
release: {{ .Release.Name }}
data:
matchmaker_config_default.yaml: |-
api:
backend:
hostname: "{{ .Values.backend.hostName }}"
grpcport: "{{ .Values.backend.grpcPort }}"
httpport: "{{ .Values.backend.httpPort }}"
frontend:
hostname: "{{ .Values.frontend.hostName }}"
grpcport: "{{ .Values.frontend.grpcPort }}"
httpport: "{{ .Values.frontend.httpPort }}"
scale:
httpport: "51509"
{{- if .Values.global.tls.enabled }}
tls:
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
certificatefile: "{{.Values.global.tls.server.mountPath}}/public.cert"
privatekey: "{{.Values.global.tls.server.mountPath}}/private.key"
rootcertificatefile: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
{{- end }}
logging:
level: debug
{{- if .Values.global.telemetry.stackdriverMetrics.enabled }}
format: stackdriver
{{- else }}
format: text
{{- end }}
rpc: {{ .Values.global.logging.rpc.enabled }}
# Open Match applies the exponential backoff strategy for its retryable gRPC calls.
# The settings below are the default backoff configuration used in Open Match.
# See https://github.com/cenkalti/backoff/blob/v3/exponential.go for detailed explanations
backoff:
# The initial retry interval (in milliseconds)
initialInterval: 100ms
# maxInterval caps the maximum time elapsed for a retry interval
maxInterval: 500ms
# The next retry interval is multiplied by this multiplier
multiplier: 1.5
# Randomize the retry interval
randFactor: 0.5
# maxElapsedTime caps the retry time (in milliseconds)
maxElapsedTime: 3000ms
telemetry:
zpages:
enable: "{{ .Values.global.telemetry.zpages.enabled }}"
jaeger:
enable: "{{ .Values.global.telemetry.jaeger.enabled }}"
samplerFraction: {{ .Values.global.telemetry.jaeger.samplerFraction }}
agentEndpoint: "{{ .Values.global.telemetry.jaeger.agentEndpoint }}"
collectorEndpoint: "{{ .Values.global.telemetry.jaeger.collectorEndpoint }}"
prometheus:
enable: "{{ .Values.global.telemetry.prometheus.enabled }}"
endpoint: "{{ .Values.global.telemetry.prometheus.endpoint }}"
serviceDiscovery: "{{ .Values.global.telemetry.prometheus.serviceDiscovery }}"
stackdriverMetrics:
enable: "{{ .Values.global.telemetry.stackdriverMetrics.enabled }}"
gcpProjectId: "{{ .Values.global.gcpProjectId }}"
prefix: "{{ .Values.global.telemetry.stackdriverMetrics.prefix }}"
reportingPeriod: "{{ .Values.global.telemetry.reportingPeriod }}"
matchmaker_config_override.yaml: |-
testConfig:
profile: "{{ .Values.testConfig.profile }}"
regions:
{{- range .Values.testConfig.regions }}
- {{ . }}
{{- end }}
characters:
{{- range .Values.testConfig.characters }}
- {{ . }}
{{- end }}
minRating: "{{ .Values.testConfig.minRating }}"
maxRating: "{{ .Values.testConfig.maxRating }}"
ticketsPerMatch: "{{ .Values.testConfig.ticketsPerMatch }}"
multifilter:
rangeSize: "{{ .Values.testConfig.multifilter.rangeSize }}"
rangeOverlap: "{{ .Values.testConfig.multifilter.rangeOverlap }}"
multipool:
rangeSize: "{{ .Values.testConfig.multipool.rangeSize }}"
rangeOverlap: "{{ .Values.testConfig.multipool.rangeOverlap }}"
characterCount: "{{ .Values.testConfig.multipool.characterCount }}"

@ -28,8 +28,29 @@ configs:
default:
volumeName: om-config-volume-default
mountPath: /app/config/default
configName: om-configmap-default
override:
volumeName: om-config-volume-override
configName: scale-configmap
scale-configmap:
volumeName: scale-config-volume
mountPath: /app/config/override
configName: om-configmap-override
configName: scale-configmap
testConfig:
profile: scaleprofiles
regions:
- region.europe-west1
- region.europe-west2
- region.europe-west3
- region.europe-west4
characters:
- cleric
- knight
minRating: 0
maxRating: 100
ticketsPerMatch: 8
multifilter:
rangeSize: 10
rangeOverlap: 5
multipool:
rangeSize: 10
rangeOverlap: 5
characterCount: 4

@ -15,8 +15,8 @@
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 2,
"iteration": 1580944984710,
"id": 3,
"iteration": 1580245993833,
"links": [],
"panels": [
{
@ -415,7 +415,7 @@
},
"id": 57,
"panels": [],
"title": "openmatch.QueryService/QueryTickets",
"title": "openmatch.Mmlogic/QueryTickets",
"type": "row"
},
{
@ -812,7 +812,7 @@
},
"id": 29,
"panels": [],
"title": "openmatch.BackendService/AssignTickets",
"title": "openmatch.Backend/AssignTickets",
"type": "row"
},
{
@ -1210,7 +1210,7 @@
"id": 31,
"panels": [],
"repeat": null,
"title": "openmatch.FrontendService/CreateTicket",
"title": "openmatch.Frontend/CreateTicket",
"type": "row"
},
{
@ -2399,7 +2399,7 @@
},
"id": 42,
"panels": [],
"title": "openmatch.BackendService/FetchMatches",
"title": "openmatch.Frontend/FetchMatches",
"type": "row"
},
{
@ -3191,7 +3191,7 @@
},
"id": 23,
"panels": [],
"title": "openmatch.FrontendService/DeleteTicket",
"title": "openmatch.Frontend/DeleteTicket",
"type": "row"
},
{

@ -16,8 +16,8 @@
"editable": true,
"gnetId": 763,
"graphTooltip": 0,
"id": 6,
"iteration": 1580946687856,
"id": 2,
"iteration": 1579655194536,
"links": [],
"panels": [
{
@ -296,7 +296,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
"fill": 0,
"fill": 1,
"gridPos": {
"h": 7,
"w": 8,
@ -312,8 +312,6 @@
"min": false,
"rightSide": true,
"show": true,
"sort": "current",
"sortDesc": true,
"total": false,
"values": true
},
@ -327,54 +325,24 @@
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "limit",
"color": "#C4162A",
"hideTooltip": true,
"legend": false,
"nullPointMode": "connected"
},
{
"alias": "request",
"color": "#73BF69",
"hideTooltip": true,
"legend": false,
"nullPointMode": "connected"
}
],
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{pod_name=~\"om-redis.*\", name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}[5m])) by (pod_name, container_name) /\nsum(container_spec_cpu_quota{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}/container_spec_cpu_period{name!~\".*prometheus.*\", image!=\"\", container_name!=\"POD\"}) by (pod_name, container_name) * 100",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{pod_name}} usage",
"legendFormat": "{{pod_name}}",
"refId": "A"
},
{
"expr": "sum(kube_pod_container_resource_limits_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "limit",
"refId": "B"
},
{
"expr": "sum(kube_pod_container_resource_requests_cpu_cores{pod=~\"om-redis.*\"}) by (pod)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "request",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "CPU Usage",
"title": "CPU Usage Percentage of Limit",
"tooltip": {
"shared": true,
"sort": 0,
@ -392,7 +360,7 @@
"yaxes": [
{
"format": "short",
"label": "core",
"label": "%",
"logBase": 1,
"max": null,
"min": null,
@ -660,8 +628,6 @@
"min": false,
"rightSide": true,
"show": true,
"sort": "current",
"sortDesc": true,
"total": false,
"values": true
},
@ -689,13 +655,6 @@
"refId": "A",
"step": 240,
"target": ""
},
{
"expr": "sum by (kubernetes_pod_name) (rate(redis_commands_total[5m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "total - {{kubernetes_pod_name}}",
"refId": "B"
}
],
"thresholds": [],
@ -952,8 +911,8 @@
{
"allValue": null,
"current": {
"text": "10.28.0.12:9121",
"value": "10.28.0.12:9121"
"text": "10.28.0.27:9121",
"value": "10.28.0.27:9121"
},
"datasource": "Prometheus",
"definition": "label_values(redis_up, instance)",

@ -68,8 +68,10 @@ data:
swaggerui:
hostname: "{{ .Values.swaggerui.hostName }}"
httpport: "{{ .Values.swaggerui.httpPort }}"
scale:
scale-frontend:
httpport: "51509"
scale-backend:
httpport: "51510"
{{- if .Values.global.tls.enabled }}
tls:
trustedCertificatePath: "{{.Values.global.tls.rootca.mountPath}}/public.cert"
@ -84,31 +86,24 @@ data:
size: 10000
redis:
{{- if index .Values "open-match-core" "redis" "enabled" }}
{{- if index .Values "redis" "sentinel" "enabled"}}
sentinelPort: {{ .Values.redis.sentinel.port }}
sentinelMaster: {{ .Values.redis.sentinel.masterSet }}
sentinelHostname: {{ .Values.redis.fullnameOverride }}.{{ .Release.Namespace }}.svc.cluster.local
sentinelUsePassword: {{ .Values.redis.sentinel.usePassword }}
{{- else}}
# Open Match's default Redis setups
{{- if index .Values "open-match-core" "redis" "install" }}
hostname: {{ .Values.redis.fullnameOverride }}-master.{{ .Release.Namespace }}.svc.cluster.local
port: {{ .Values.redis.redisPort }}
user: {{ .Values.redis.user }}
{{- end}}
{{- else }}
# BYO Redis setups
hostname: {{ index .Values "open-match-core" "redis" "hostname" }}
port: {{ index .Values "open-match-core" "redis" "port" }}
user: {{ index .Values "open-match-core" "redis" "user" }}
{{- end }}
usePassword: {{ .Values.redis.usePassword }}
{{- if .Values.redis.usePassword }}
passwordPath: {{ .Values.redis.secretMountPath }}/redis-password
{{- end }}
pool:
maxIdle: {{ index .Values "open-match-core" "redis" "pool" "maxIdle" }}
maxActive: {{ index .Values "open-match-core" "redis" "pool" "maxActive" }}
idleTimeout: {{ index .Values "open-match-core" "redis" "pool" "idleTimeout" }}
healthCheckTimeout: {{ index .Values "open-match-core" "redis" "pool" "healthCheckTimeout" }}
expiration: 43200
telemetry:
zpages:

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
{{- if index .Values "open-match-core" "swaggerui" "enabled" }}
{{- if index .Values "open-match-core" "enabled" }}
kind: Service
apiVersion: v1
metadata:

@ -112,10 +112,6 @@ redis:
configmap: |
maxclients 100000
maxmemory 500000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -174,8 +170,8 @@ open-match-core:
enabled: true
ignoreListTTL: 60000ms
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
install: true
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
@ -185,8 +181,6 @@ open-match-core:
maxActive: 500
idleTimeout: 0
healthCheckTimeout: 300ms
swaggerui:
enabled: false
# Controls if users need to install scale testing setup for Open Match.
open-match-scale:
@ -256,7 +250,7 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.10.0
tag: 0.9.0
pullPolicy: Always

@ -112,10 +112,6 @@ redis:
configmap: |
maxclients 100000
maxmemory 300000000
sentinel:
enabled: true
masterSet: om-redis-master
port: 26379
master:
disableCommands: [] # don't disable 'FLUSH-' commands
resources:
@ -159,8 +155,8 @@ open-match-core:
enabled: true
ignoreListTTL: 60000ms
redis:
enabled: true
# If open-match-core.redis.enabled is set to false, have Open Match components talk to this redis address instead.
install: true
# If open-match-core.redis.install is set to false, have Open Match components talk to this redis address instead.
# Otherwise the default is set to the om-redis instance.
hostname: # Your redis server address
port: 6379
@ -170,8 +166,6 @@ open-match-core:
maxActive: 0
idleTimeout: 0
healthCheckTimeout: 300ms
swaggerui:
enabled: true
# Controls if users need to install scale testing setup for Open Match.
open-match-scale:
@ -241,7 +235,7 @@ global:
# Use this field if you need to override the image registry and image tag for all services defined in this chart
image:
registry: gcr.io/open-match-public-images
tag: 0.10.0
tag: 0.9.0
pullPolicy: Always

@ -25,11 +25,11 @@ import (
"github.com/golang/protobuf/jsonpb"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/ipb"
"open-match.dev/open-match/internal/omerror"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/internal/telemetry"
@ -67,38 +67,39 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
return status.Error(codes.InvalidArgument, ".profile is required")
}
// Error group for handling the synchronizer calls only.
eg, ctx := errgroup.WithContext(stream.Context())
syncStream, err := s.synchronizer.synchronize(ctx)
syncStream, err := s.synchronizer.synchronize(stream.Context())
if err != nil {
return err
}
// The mmf must be canceled if the synchronizer call fails (which will
// cancel the context from the error group). However the synchronizer call
// is NOT dependant on the mmf call.
mmfCtx, cancelMmfs := context.WithCancel(ctx)
mmfCtx, cancelMmfs := context.WithCancel(stream.Context())
// Closed when mmfs should start.
startMmfs := make(chan struct{})
proposals := make(chan *pb.Match)
m := &sync.Map{}
eg.Go(func() error {
return synchronizeSend(ctx, syncStream, m, proposals)
})
eg.Go(func() error {
return synchronizeRecv(ctx, syncStream, m, stream, startMmfs, cancelMmfs)
synchronizerWait := omerror.WaitOnErrors(logger, func() error {
return synchronizeSend(stream.Context(), syncStream, m, proposals)
}, func() error {
return synchronizeRecv(syncStream, m, stream, startMmfs, cancelMmfs)
})
var mmfErr error
select {
case <-mmfCtx.Done():
mmfErr = fmt.Errorf("mmf was never started")
case <-startMmfs:
mmfErr = callMmf(mmfCtx, s.cc, req, proposals)
}
mmfWait := omerror.WaitOnErrors(logger, func() error {
select {
case <-mmfCtx.Done():
return fmt.Errorf("Mmf was never started")
case <-startMmfs:
}
syncErr := eg.Wait()
return callMmf(mmfCtx, s.cc, req, proposals)
})
syncErr := synchronizerWait()
// Fetch Matches should never block on just the match function.
// Must cancel mmfs after synchronizer is done and before checking mmf error
// because the synchronizer call could fail while the mmf call blocks.
cancelMmfs()
mmfErr := mmfWait()
// TODO: Send mmf error in FetchSummary instead of erroring call.
if syncErr != nil || mmfErr != nil {
@ -108,7 +109,7 @@ func (s *backendService) FetchMatches(req *pb.FetchMatchesRequest, stream pb.Bac
}).Error("error(s) in FetchMatches call.")
return fmt.Errorf(
"error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
"Error(s) in FetchMatches call. syncErr=[%s], mmfErr=[%s]",
syncErr,
mmfErr,
)
@ -127,10 +128,7 @@ sendProposals:
if !ok {
break sendProposals
}
id, loaded := m.LoadOrStore(p.GetMatchId(), p)
if loaded {
return fmt.Errorf("found duplicate matchID %s returned from MMF", id)
}
m.Store(p.GetMatchId(), p)
telemetry.RecordUnitMeasurement(ctx, mMatchesSentToEvaluation)
err := syncStream.Send(&ipb.SynchronizeRequest{Proposal: p})
if err != nil {
@ -146,7 +144,7 @@ sendProposals:
return nil
}
func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs context.CancelFunc) error {
func synchronizeRecv(syncStream synchronizerStream, m *sync.Map, stream pb.BackendService_FetchMatchesServer, startMmfs chan<- struct{}, cancelMmfs context.CancelFunc) error {
var startMmfsOnce sync.Once
for {
@ -169,7 +167,7 @@ func synchronizeRecv(ctx context.Context, syncStream synchronizerStream, m *sync
}
if match, ok := m.Load(resp.GetMatchId()); ok {
telemetry.RecordUnitMeasurement(ctx, mMatchesFetched)
telemetry.RecordUnitMeasurement(stream.Context(), mMatchesFetched)
err = stream.Send(&pb.FetchMatchesResponse{Match: match.(*pb.Match)})
if err != nil {
return fmt.Errorf("error sending match to caller of backend: %w", err)
@ -306,35 +304,23 @@ func (s *backendService) ReleaseTickets(ctx context.Context, req *pb.ReleaseTick
// AssignTickets overwrites the Assignment field of the input TicketIds.
func (s *backendService) AssignTickets(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp, err := doAssignTickets(ctx, req, s.store)
err := doAssignTickets(ctx, req, s.store)
if err != nil {
logger.WithError(err).Error("failed to update assignments for requested tickets")
return nil, err
}
numIds := 0
for _, ag := range req.Assignments {
numIds += len(ag.TicketIds)
}
telemetry.RecordNUnitMeasurement(ctx, mTicketsAssigned, int64(numIds))
return resp, nil
telemetry.RecordNUnitMeasurement(ctx, mTicketsAssigned, int64(len(req.TicketIds)))
return &pb.AssignTicketsResponse{}, nil
}
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) (*pb.AssignTicketsResponse, error) {
resp, err := store.UpdateAssignments(ctx, req)
func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store statestore.Service) error {
err := store.UpdateAssignments(ctx, req.GetTicketIds(), req.GetAssignment())
if err != nil {
logger.WithError(err).Error("failed to update assignments")
return nil, err
return err
}
ids := []string{}
for _, ag := range req.Assignments {
ids = append(ids, ag.TicketIds...)
}
for _, id := range ids {
for _, id := range req.GetTicketIds() {
err = store.DeindexTicket(ctx, id)
// Try to deindex all input tickets. Log without returning an error if the deindexing operation failed.
// TODO: consider retry the index operation
@ -343,13 +329,13 @@ func doAssignTickets(ctx context.Context, req *pb.AssignTicketsRequest, store st
}
}
if err = store.DeleteTicketsFromIgnoreList(ctx, ids); err != nil {
if err = store.DeleteTicketsFromIgnoreList(ctx, req.GetTicketIds()); err != nil {
logger.WithFields(logrus.Fields{
"ticket_ids": ids,
"ticket_ids": req.GetTicketIds(),
}).Error(err)
}
return resp, nil
return nil
}
func doReleasetickets(ctx context.Context, req *pb.ReleaseTicketsRequest, store statestore.Service) error {

@ -0,0 +1,309 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package backend
import (
"context"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/statestore"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
)
func TestDoReleaseTickets(t *testing.T) {
fakeProperty := "test-property"
fakeTickets := []*pb.Ticket{
{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 1,
},
},
},
{
Id: "2",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
{
Id: "3",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
}
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service, *pb.Pool)
req *pb.ReleaseTicketsRequest
wantCode codes.Code
pool *pb.Pool
expectTickets []string
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service, pool *pb.Pool) {
cancel()
},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{"1"},
},
wantCode: codes.Unavailable,
},
{
description: "expect ok code when submitted list is empty",
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
expectTickets: []string{"3"},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{},
},
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
// Ignore a few tickets
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2"})
assert.Nil(t, err)
// Make sure it was properly ignored
var ignoredFilterTickets []*pb.Ticket
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
ignoredFilterTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets)-2, len(ignoredFilterTickets))
},
wantCode: codes.OK,
},
{
description: "expect ok code",
pool: &pb.Pool{DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}}},
wantCode: codes.OK,
expectTickets: []string{"1", "2"},
req: &pb.ReleaseTicketsRequest{
TicketIds: []string{"1", "2"},
},
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service, pool *pb.Pool) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
// Ignore all the tickets
err = store.AddTicketsToIgnoreList(ctx, []string{"1", "2", "3"})
assert.Nil(t, err)
// Make sure it was properly ignored
var ignoredFilterTickets []*pb.Ticket
err = store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
ignoredFilterTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets)-3, len(ignoredFilterTickets))
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
test.preAction(ctx, cancel, store, test.pool)
err := doReleasetickets(ctx, test.req, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
// Make sure that the expected tickets are available for query
var filteredTickets []*pb.Ticket
err = store.FilterTickets(ctx, test.pool, 10, func(filterTickets []*pb.Ticket) error {
filteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(filteredTickets), len(test.expectTickets))
for _, ticket := range filteredTickets {
assert.Contains(t, test.expectTickets, ticket.GetId())
}
}
})
}
}
func TestDoAssignTickets(t *testing.T) {
fakeProperty := "test-property"
fakeTickets := []*pb.Ticket{
{
Id: "1",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 1,
},
},
},
{
Id: "2",
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
fakeProperty: 2,
},
},
},
}
tests := []struct {
description string
preAction func(context.Context, context.CancelFunc, statestore.Service)
req *pb.AssignTicketsRequest
wantCode codes.Code
wantAssignment *pb.Assignment
}{
{
description: "expect unavailable code since context is canceled before being called",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1"},
Assignment: &pb.Assignment{},
},
wantCode: codes.Unavailable,
},
{
description: "expect invalid argument code since assignment is nil",
preAction: func(_ context.Context, cancel context.CancelFunc, _ statestore.Service) {
cancel()
},
req: &pb.AssignTicketsRequest{},
wantCode: codes.InvalidArgument,
},
{
description: "expect not found code since ticket does not exist",
preAction: func(_ context.Context, _ context.CancelFunc, _ statestore.Service) {},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1", "2"},
Assignment: &pb.Assignment{
Connection: "123",
},
},
wantCode: codes.NotFound,
},
{
description: "expect ok code",
preAction: func(ctx context.Context, cancel context.CancelFunc, store statestore.Service) {
for _, fakeTicket := range fakeTickets {
store.CreateTicket(ctx, fakeTicket)
store.IndexTicket(ctx, fakeTicket)
}
// Make sure tickets are correctly indexed.
var wantFilteredTickets []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 3}},
}
err := store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, err)
assert.Equal(t, len(fakeTickets), len(wantFilteredTickets))
},
req: &pb.AssignTicketsRequest{
TicketIds: []string{"1", "2"},
Assignment: &pb.Assignment{
Connection: "123",
},
},
wantCode: codes.OK,
wantAssignment: &pb.Assignment{
Connection: "123",
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
cfg := viper.New()
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
test.preAction(ctx, cancel, store)
err := doAssignTickets(ctx, test.req, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
for _, id := range test.req.GetTicketIds() {
ticket, err := store.GetTicket(ctx, id)
assert.Nil(t, err)
assert.Equal(t, test.wantAssignment, ticket.GetAssignment())
}
// Make sure tickets are deindexed after assignment
var wantFilteredTickets []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{DoubleArg: fakeProperty, Min: 0, Max: 2}},
}
store.FilterTickets(ctx, pool, 10, func(filterTickets []*pb.Ticket) error {
wantFilteredTickets = filterTickets
return nil
})
assert.Nil(t, wantFilteredTickets)
}
})
}
}
// TODOs: add unit tests to doFetchMatchesFilterSkiplistIds and doFetchMatchesAddSkiplistIds

@ -18,8 +18,6 @@ import (
"context"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/rs/xid"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@ -53,22 +51,16 @@ var (
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.Ticket, error) {
func (s *frontendService) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.CreateTicketResponse, error) {
// Perform input validation.
if req.Ticket == nil {
if req.GetTicket() == nil {
return nil, status.Errorf(codes.InvalidArgument, ".ticket is required")
}
if req.Ticket.Assignment != nil {
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with an assignment")
}
if req.Ticket.CreateTime != nil {
return nil, status.Errorf(codes.InvalidArgument, "tickets cannot be created with create time set")
}
return doCreateTicket(ctx, req, s.store)
}
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.Ticket, error) {
func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store statestore.Service) (*pb.CreateTicketResponse, error) {
// Generate a ticket id and create a Ticket in state storage
ticket, ok := proto.Clone(req.Ticket).(*pb.Ticket)
if !ok {
@ -76,7 +68,6 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
}
ticket.Id = xid.New().String()
ticket.CreateTime = ptypes.TimestampNow()
err := store.CreateTicket(ctx, ticket)
if err != nil {
logger.WithFields(logrus.Fields{
@ -96,20 +87,20 @@ func doCreateTicket(ctx context.Context, req *pb.CreateTicketRequest, store stat
}
telemetry.RecordUnitMeasurement(ctx, mTicketsCreated)
return ticket, nil
return &pb.CreateTicketResponse{Ticket: ticket}, nil
}
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*empty.Empty, error) {
func (s *frontendService) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*pb.DeleteTicketResponse, error) {
err := doDeleteTicket(ctx, req.GetTicketId(), s.store)
if err != nil {
return nil, err
}
telemetry.RecordUnitMeasurement(ctx, mTicketsDeleted)
return &empty.Empty{}, nil
return &pb.DeleteTicketResponse{}, nil
}
func doDeleteTicket(ctx context.Context, id string, store statestore.Service) error {
@ -167,9 +158,9 @@ func doGetTickets(ctx context.Context, id string, store statestore.Service) (*pb
return ticket, nil
}
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
func (s *frontendService) GetAssignments(req *pb.GetAssignmentsRequest, stream pb.FrontendService_GetAssignmentsServer) error {
ctx := stream.Context()
for {
select {
@ -178,14 +169,14 @@ func (s *frontendService) WatchAssignments(req *pb.WatchAssignmentsRequest, stre
default:
sender := func(assignment *pb.Assignment) error {
telemetry.RecordUnitMeasurement(ctx, mTicketAssignmentsRetrieved)
return stream.Send(&pb.WatchAssignmentsResponse{Assignment: assignment})
return stream.Send(&pb.GetAssignmentsResponse{Assignment: assignment})
}
return doWatchAssignments(ctx, req.GetTicketId(), sender, s.store)
return doGetAssignments(ctx, req.GetTicketId(), sender, s.store)
}
}
}
func doWatchAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
func doGetAssignments(ctx context.Context, id string, sender func(*pb.Assignment) error, store statestore.Service) error {
var currAssignment *pb.Assignment
var ok bool
callback := func(assignment *pb.Assignment) error {

@ -68,7 +68,6 @@ func TestDoCreateTickets(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
@ -79,16 +78,16 @@ func TestDoCreateTickets(t *testing.T) {
res, err := doCreateTicket(ctx, &pb.CreateTicketRequest{Ticket: test.ticket}, store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
if err == nil {
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetId())
matched, err := regexp.MatchString(`[0-9a-v]{20}`, res.GetTicket().GetId())
assert.True(t, matched)
assert.Nil(t, err)
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.SearchFields.DoubleArgs["test-arg"])
assert.Equal(t, test.ticket.SearchFields.DoubleArgs["test-arg"], res.Ticket.SearchFields.DoubleArgs["test-arg"])
}
})
}
}
func TestDoWatchAssignments(t *testing.T) {
func TestDoGetAssignments(t *testing.T) {
testTicket := &pb.Ticket{
Id: "test-id",
}
@ -123,15 +122,7 @@ func TestDoWatchAssignments(t *testing.T) {
go func(wg *sync.WaitGroup) {
for i := 0; i < len(wantAssignments); i++ {
time.Sleep(50 * time.Millisecond)
_, err := store.UpdateAssignments(ctx, &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{testTicket.GetId()},
Assignment: wantAssignments[i],
},
},
})
assert.Nil(t, err)
assert.Nil(t, store.UpdateAssignments(ctx, []string{testTicket.GetId()}, wantAssignments[i]))
wg.Done()
}
}(wg)
@ -154,7 +145,7 @@ func TestDoWatchAssignments(t *testing.T) {
gotAssignments := []*pb.Assignment{}
test.preAction(ctx, t, store, test.wantAssignments, &wg)
err := doWatchAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
err := doGetAssignments(ctx, testTicket.GetId(), senderGenerator(gotAssignments, len(test.wantAssignments)), store)
assert.Equal(t, test.wantCode, status.Convert(err).Code())
wg.Wait()
@ -202,7 +193,6 @@ func TestDoDeleteTicket(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())
@ -256,7 +246,6 @@ func TestDoGetTicket(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(utilTesting.NewContext(t))
store, closer := statestoreTesting.NewStoreServiceForTesting(t, viper.New())

@ -18,16 +18,19 @@ import (
"google.golang.org/grpc"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
)
// BindService creates the query service and binds it to the serving harness.
func BindService(p *rpc.ServerParams, cfg config.View) error {
service := &queryService{
cfg: cfg,
tc: newTicketCache(p, cfg),
cfg: cfg,
store: statestore.New(cfg),
}
p.AddHealthCheckFunc(service.store.HealthCheck)
p.AddHandleFunc(func(s *grpc.Server) {
pb.RegisterQueryServiceServer(s, service)
}, pb.RegisterQueryServiceHandlerFromEndpoint)

@ -16,17 +16,14 @@ package query
import (
"context"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/filter"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/internal/statestore"
"open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/internal/statestore"
)
var (
@ -39,91 +36,43 @@ var (
// queryService API provides utility functions for common MMF functionality such
// as retreiving Tickets from state storage.
type queryService struct {
cfg config.View
tc *ticketCache
cfg config.View
store statestore.Service
}
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
func (s *queryService) QueryTickets(req *pb.QueryTicketsRequest, responseServer pb.QueryService_QueryTicketsServer) error {
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
if err != nil {
return err
}
var results []*pb.Ticket
err = s.tc.request(responseServer.Context(), func(tickets map[string]*pb.Ticket) {
for _, ticket := range tickets {
if pf.In(ticket) {
results = append(results, ticket)
}
}
})
if err != nil {
logger.WithError(err).Error("Failed to run request.")
return err
}
ctx := responseServer.Context()
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryTicketsResponse{
Tickets: results[start:end],
})
callback := func(tickets []*pb.Ticket) error {
err := responseServer.Send(&pb.QueryTicketsResponse{Tickets: tickets})
if err != nil {
return err
logger.WithError(err).Error("Failed to send Redis response to grpc server")
return status.Errorf(codes.Aborted, err.Error())
}
return nil
}
return nil
return doQueryTickets(ctx, pool, pSize, callback, s.store)
}
func (s *queryService) QueryTicketIds(req *pb.QueryTicketIdsRequest, responseServer pb.QueryService_QueryTicketIdsServer) error {
pool := req.GetPool()
if pool == nil {
return status.Error(codes.InvalidArgument, ".pool is required")
}
pf, err := filter.NewPoolFilter(pool)
func doQueryTickets(ctx context.Context, pool *pb.Pool, pageSize int, sender func(tickets []*pb.Ticket) error, store statestore.Service) error {
// Send requests to the storage service
err := store.FilterTickets(ctx, pool, pageSize, sender)
if err != nil {
logger.WithError(err).Error("Failed to retrieve result from storage service.")
return err
}
var results []string
err = s.tc.request(responseServer.Context(), func(tickets map[string]*pb.Ticket) {
for id, ticket := range tickets {
if pf.In(ticket) {
results = append(results, id)
}
}
})
if err != nil {
logger.WithError(err).Error("Failed to run request.")
return err
}
pSize := getPageSize(s.cfg)
for start := 0; start < len(results); start += pSize {
end := start + pSize
if end > len(results) {
end = len(results)
}
err := responseServer.Send(&pb.QueryTicketIdsResponse{
Ids: results[start:end],
})
if err != nil {
return err
}
}
return nil
}
@ -158,153 +107,3 @@ func getPageSize(cfg config.View) int {
return pSize
}
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// ticketCache unifies concurrent requests into a single cache update, and
// gives a safe view into that map cache.
type ticketCache struct {
store statestore.Service
requests chan *cacheRequest
// Single item buffered channel. Holds a value when runQuery can be safely
// started. Basically a channel/select friendly mutex around runQuery
// running.
startRunRequest chan struct{}
wg sync.WaitGroup
// Mutlithreaded unsafe fields, only to be written by update, and read when
// request given the ok.
tickets map[string]*pb.Ticket
err error
}
func newTicketCache(p *rpc.ServerParams, cfg config.View) *ticketCache {
tc := &ticketCache{
store: statestore.New(cfg),
requests: make(chan *cacheRequest),
startRunRequest: make(chan struct{}, 1),
tickets: make(map[string]*pb.Ticket),
}
tc.startRunRequest <- struct{}{}
p.AddHealthCheckFunc(tc.store.HealthCheck)
return tc
}
type cacheRequest struct {
ctx context.Context
runNow chan struct{}
}
func (tc *ticketCache) request(ctx context.Context, f func(map[string]*pb.Ticket)) error {
cr := &cacheRequest{
ctx: ctx,
runNow: make(chan struct{}),
}
sendRequest:
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled before reuest sent.")
case <-tc.startRunRequest:
go tc.runRequest()
case tc.requests <- cr:
break sendRequest
}
}
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "ticket cache request canceled waiting for access.")
case <-cr.runNow:
defer tc.wg.Done()
}
if tc.err != nil {
return tc.err
}
f(tc.tickets)
return nil
}
func (tc *ticketCache) runRequest() {
defer func() {
tc.startRunRequest <- struct{}{}
}()
// Wait for first query request.
reqs := []*cacheRequest{<-tc.requests}
// Collect all waiting queries.
collectAllWaiting:
for {
select {
case req := <-tc.requests:
reqs = append(reqs, req)
default:
break collectAllWaiting
}
}
tc.update()
// Send WaitGroup to query calls, letting them run their query on the ticket
// cache.
for _, req := range reqs {
tc.wg.Add(1)
select {
case req.runNow <- struct{}{}:
case <-req.ctx.Done():
tc.wg.Done()
}
}
// wait for requests to finish using ticket cache.
tc.wg.Wait()
}
func (tc *ticketCache) update() {
previousCount := len(tc.tickets)
currentAll, err := tc.store.GetIndexedIDSet(context.Background())
if err != nil {
tc.err = err
return
}
deletedCount := 0
for id := range tc.tickets {
if _, ok := currentAll[id]; !ok {
delete(tc.tickets, id)
deletedCount++
}
}
toFetch := []string{}
for id := range currentAll {
if _, ok := tc.tickets[id]; !ok {
toFetch = append(toFetch, id)
}
}
newTickets, err := tc.store.GetTickets(context.Background(), toFetch)
if err != nil {
tc.err = err
return
}
for _, t := range newTickets {
tc.tickets[t.Id] = t
}
logger.Debugf("Ticket Cache update: Previous %d, Deleted %d, Fetched %d, Current %d", previousCount, deletedCount, len(toFetch), len(tc.tickets))
tc.err = nil
}

@ -15,12 +15,139 @@
package query
import (
"context"
"errors"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
internalTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
)
func TestDoQueryTickets(t *testing.T) {
const (
DoubleArg1 = "level"
DoubleArg2 = "spd"
)
var actualTickets []*pb.Ticket
fakeErr := errors.New("some error")
senderGenerator := func(err error) func(tickets []*pb.Ticket) error {
return func(tickets []*pb.Ticket) error {
if err != nil {
return err
}
actualTickets = tickets
return err
}
}
testTickets := internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 20, Interval: 5},
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
)
tests := []struct {
description string
sender func(tickets []*pb.Ticket) error
pool *pb.Pool
pageSize int
action func(context.Context, *testing.T, statestore.Service)
wantErr error
wantTickets []*pb.Ticket
}{
{
"expect empty response from an empty store",
senderGenerator(nil),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(_ context.Context, _ *testing.T, _ statestore.Service) {},
nil,
nil,
},
{
"expect tickets with DoubleArg1 value in range of [0, 10] (inclusively)",
senderGenerator(nil),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(ctx context.Context, t *testing.T, store statestore.Service) {
for _, testTicket := range testTickets {
assert.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.IndexTicket(ctx, testTicket))
}
},
nil,
internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: DoubleArg1, Min: 0, Max: 10.1, Interval: 5},
internalTesting.Property{Name: DoubleArg2, Min: 0, Max: 20, Interval: 5},
),
},
{
"expect error from canceled context",
senderGenerator(fakeErr),
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: DoubleArg1,
Min: 0,
Max: 10,
},
},
},
100,
func(ctx context.Context, t *testing.T, store statestore.Service) {
for _, testTicket := range testTickets {
assert.Nil(t, store.CreateTicket(ctx, testTicket))
assert.Nil(t, store.IndexTicket(ctx, testTicket))
}
},
status.Errorf(codes.Internal, "%v", fakeErr),
nil,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cfg := viper.New()
cfg.Set("storage.page.size", 1000)
store, closer := statestoreTesting.NewStoreServiceForTesting(t, cfg)
defer closer()
ctx := utilTesting.NewContext(t)
test.action(ctx, t, store)
assert.Equal(t, test.wantErr, doQueryTickets(ctx, test.pool, test.pageSize, test.sender, store))
for _, wantTicket := range test.wantTickets {
assert.Contains(t, actualTickets, wantTicket)
}
})
}
}
func TestGetPageSize(t *testing.T) {
testCases := []struct {
name string
@ -56,7 +183,6 @@ func TestGetPageSize(t *testing.T) {
}
for _, tt := range testCases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
cfg := viper.New()
tt.configure(cfg)

@ -24,10 +24,11 @@ import (
"github.com/golang/protobuf/jsonpb"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/omerror"
"open-match.dev/open-match/internal/rpc"
"open-match.dev/open-match/pkg/pb"
)
@ -43,7 +44,7 @@ type evaluator interface {
evaluate(context.Context, <-chan []*pb.Match) ([]string, error)
}
var errNoEvaluatorType = status.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
var errNoEvaluatorType = grpc.Errorf(codes.FailedPrecondition, "unable to determine evaluator type, either api.evaluator.grpcport or api.evaluator.httpport must be specified in the config")
func newEvaluator(cfg config.View) evaluator {
newInstance := func(cfg config.View) (interface{}, func(), error) {
@ -87,7 +88,7 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
grpcAddr := fmt.Sprintf("%s:%d", cfg.GetString("api.evaluator.hostname"), cfg.GetInt64("api.evaluator.grpcport"))
conn, err := rpc.GRPCClientFromEndpoint(cfg, grpcAddr)
if err != nil {
return nil, nil, fmt.Errorf("failed to create grpc evaluator client: %w", err)
return nil, nil, fmt.Errorf("Failed to create grpc evaluator client: %w", err)
}
evaluatorClientLogger.WithFields(logrus.Fields{
@ -107,26 +108,20 @@ func newGrpcEvaluator(cfg config.View) (evaluator, func(), error) {
}
func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Match) ([]string, error) {
eg, ctx := errgroup.WithContext(ctx)
var stream pb.Evaluator_EvaluateClient
{ // prevent shadowing err later
var err error
stream, err = ec.evaluator.Evaluate(ctx)
if err != nil {
return nil, fmt.Errorf("error starting evaluator call: %w", err)
return nil, fmt.Errorf("Error starting evaluator call: %w", err)
}
}
results := []string{}
matchIDs := &sync.Map{}
eg.Go(func() error {
wait := omerror.WaitOnErrors(evaluatorClientLogger, func() error {
for proposals := range pc {
for _, proposal := range proposals {
if _, ok := matchIDs.LoadOrStore(proposal.GetMatchId(), true); ok {
return fmt.Errorf("found duplicate matchID %s", proposal.GetMatchId())
}
if err := stream.Send(&pb.EvaluateRequest{Match: proposal}); err != nil {
return fmt.Errorf("failed to send request to evaluator, desc: %w", err)
}
@ -137,9 +132,7 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
return fmt.Errorf("failed to close the send direction of evaluator stream, desc: %w", err)
}
return nil
})
eg.Go(func() error {
}, func() error {
for {
// TODO: add grpc timeouts for this call.
resp, err := stream.Recv()
@ -149,19 +142,11 @@ func (ec *grcpEvaluatorClient) evaluate(ctx context.Context, pc <-chan []*pb.Mat
if err != nil {
return fmt.Errorf("failed to get response from evaluator client, desc: %w", err)
}
v, ok := matchIDs.LoadOrStore(resp.GetMatchId(), false)
if !ok {
return fmt.Errorf("evaluator returned unmatched matchID %s which does not correspond to its input", resp.GetMatchId())
}
if !v.(bool) {
return fmt.Errorf("evaluator returned duplicated matchID %s", resp.GetMatchId())
}
results = append(results, resp.GetMatchId())
}
})
err := eg.Wait()
err := wait()
if err != nil {
return nil, err
}

@ -133,10 +133,8 @@ var getTests = []struct {
},
}
//nolint: gocritic, staticcheck
func Test_Get(t *testing.T) {
for _, tt := range getTests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
if tt.verifySame == nil {
tt.verifySame = func(a, b interface{}) bool {

@ -17,10 +17,9 @@ package config
import (
"fmt"
"log"
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
"log"
)
// Read sets default to a viper instance and read user config to override these defaults.

@ -1,136 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package filter defines which tickets pass which filters. Other implementations which help
// filter tickets (eg, a range index lookup) must conform to the same set of tickets being within
// the filter as here.
package filter
import (
"time"
"github.com/golang/protobuf/ptypes"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/pkg/pb"
)
var emptySearchFields = &pb.SearchFields{}
var (
logger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
"component": "filter",
})
)
// PoolFilter contains all the filtering criteria from a Pool that the Ticket
// needs to meet to belong to that Pool.
type PoolFilter struct {
DoubleRangeFilters []*pb.DoubleRangeFilter
StringEqualsFilters []*pb.StringEqualsFilter
TagPresentFilters []*pb.TagPresentFilter
CreatedBefore time.Time
CreatedAfter time.Time
}
// NewPoolFilter validates a Pool's filtering criteria and returns a PoolFilter.
func NewPoolFilter(pool *pb.Pool) (*PoolFilter, error) {
var ca, cb time.Time
var err error
if pool.GetCreatedBefore() != nil {
if cb, err = ptypes.Timestamp(pool.GetCreatedBefore()); err != nil {
return nil, status.Error(codes.InvalidArgument, ".invalid created_before value")
}
}
if pool.GetCreatedAfter() != nil {
if ca, err = ptypes.Timestamp(pool.GetCreatedAfter()); err != nil {
return nil, status.Error(codes.InvalidArgument, ".invalid created_after value")
}
}
return &PoolFilter{
DoubleRangeFilters: pool.GetDoubleRangeFilters(),
StringEqualsFilters: pool.GetStringEqualsFilters(),
TagPresentFilters: pool.GetTagPresentFilters(),
CreatedBefore: cb,
CreatedAfter: ca,
}, nil
}
// In returns true if the Ticket meets all the criteria for this PoolFilter.
func (pf *PoolFilter) In(ticket *pb.Ticket) bool {
s := ticket.GetSearchFields()
if s == nil {
s = emptySearchFields
}
if !pf.CreatedAfter.IsZero() || !pf.CreatedBefore.IsZero() {
// CreateTime is only populated by Open Match and hence expected to be valid.
if ct, err := ptypes.Timestamp(ticket.CreateTime); err == nil {
if !pf.CreatedAfter.IsZero() {
if !ct.After(pf.CreatedAfter) {
return false
}
}
if !pf.CreatedBefore.IsZero() {
if !ct.Before(pf.CreatedBefore) {
return false
}
}
} else {
logger.WithFields(logrus.Fields{
"error": err.Error(),
"id": ticket.GetId(),
}).Error("failed to get time from Timestamp proto")
}
}
for _, f := range pf.DoubleRangeFilters {
v, ok := s.DoubleArgs[f.DoubleArg]
if !ok {
return false
}
// Not simplified so that NaN cases are handled correctly.
if !(v >= f.Min && v <= f.Max) {
return false
}
}
for _, f := range pf.StringEqualsFilters {
v, ok := s.StringArgs[f.StringArg]
if !ok {
return false
}
if f.Value != v {
return false
}
}
outer:
for _, f := range pf.TagPresentFilters {
for _, v := range s.Tags {
if v == f.Tag {
continue outer
}
}
return false
}
return true
}

@ -1,92 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filter
import (
"testing"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/filter/testcases"
"open-match.dev/open-match/pkg/pb"
)
func TestMeetsCriteria(t *testing.T) {
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if !pf.In(tc.Ticket) {
t.Error("ticket should be included in the pool")
}
})
}
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.Pool)
if err != nil {
t.Error("pool should be valid")
}
tc.Ticket.CreateTime = ptypes.TimestampNow()
if pf.In(tc.Ticket) {
t.Error("ticket should be excluded from the pool")
}
})
}
}
func TestValidPoolFilter(t *testing.T) {
for _, tc := range []struct {
name string
pool *pb.Pool
code codes.Code
msg string
}{
{
"invalid create before",
&pb.Pool{
CreatedBefore: &timestamp.Timestamp{Nanos: -1},
},
codes.InvalidArgument,
".invalid created_before value",
},
{
"invalid create after",
&pb.Pool{
CreatedAfter: &timestamp.Timestamp{Nanos: -1},
},
codes.InvalidArgument,
".invalid created_after value",
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
pf, err := NewPoolFilter(tc.pool)
assert.Nil(t, pf)
s := status.Convert(err)
assert.Equal(t, tc.code, s.Code())
assert.Equal(t, tc.msg, s.Message())
})
}
}

@ -1,413 +0,0 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testcases contains lists of ticket filtering test cases.
package testcases
import (
"fmt"
"math"
"time"
"github.com/golang/protobuf/ptypes"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"open-match.dev/open-match/pkg/pb"
)
// TestCase defines a single filtering test case to run.
type TestCase struct {
Name string
Ticket *pb.Ticket
Pool *pb.Pool
}
// IncludedTestCases returns a list of test cases where using the given filter,
// the ticket is included in the result.
func IncludedTestCases() []TestCase {
now := time.Now()
return []TestCase{
{
"no filters or fields",
&pb.Ticket{},
&pb.Pool{},
},
simpleDoubleRange("simpleInRange", 5, 0, 10),
simpleDoubleRange("exactMatch", 5, 5, 5),
simpleDoubleRange("infinityMax", math.Inf(1), 0, math.Inf(1)),
simpleDoubleRange("infinityMin", math.Inf(-1), math.Inf(-1), 0),
{
"String equals simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
},
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "field",
Value: "value",
},
},
},
},
{
"TagPresent simple positive",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"mytag",
},
},
},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "mytag",
},
},
},
},
{
"TagPresent multiple all present",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "A",
},
{
Tag: "C",
},
{
Tag: "B",
},
},
},
},
multipleFilters(true, true, true),
{
"CreatedBefore simple positive",
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
},
},
{
"CreatedAfter simple positive",
&pb.Ticket{},
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
},
},
{
"Between CreatedBefore and CreatedAfter positive",
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 1)),
CreatedAfter: timestamp(now.Add(time.Hour * -1)),
},
},
{
"No time search criteria positive",
&pb.Ticket{},
&pb.Pool{},
},
}
}
// ExcludedTestCases returns a list of test cases where using the given filter,
// the ticket is NOT included in the result.
func ExcludedTestCases() []TestCase {
now := time.Now()
return []TestCase{
{
"DoubleRange no SearchFields",
&pb.Ticket{},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "field",
Min: math.Inf(-1),
Max: math.Inf(1),
},
},
},
},
{
"StringEquals no SearchFields",
&pb.Ticket{},
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "field",
Value: "value",
},
},
},
},
{
"TagPresent no SearchFields",
&pb.Ticket{},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "value",
},
},
},
},
{
"double range missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"otherfield": 0,
},
},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "field",
Min: math.Inf(-1),
Max: math.Inf(1),
},
},
},
},
simpleDoubleRange("valueTooLow", -1, 0, 10),
simpleDoubleRange("valueTooHigh", 11, 0, 10),
simpleDoubleRange("minIsNan", 5, math.NaN(), 10),
simpleDoubleRange("maxIsNan", 5, 0, math.NaN()),
simpleDoubleRange("minMaxAreNan", 5, math.NaN(), math.NaN()),
simpleDoubleRange("valueIsNan", math.NaN(), 0, 10),
simpleDoubleRange("valueIsNanInfRange", math.NaN(), math.Inf(-1), math.Inf(1)),
simpleDoubleRange("allAreNan", math.NaN(), math.NaN(), math.NaN()),
{
"String equals simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"field": "value",
},
},
},
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "field",
Value: "VALUE",
},
},
},
},
{
"String equals missing field",
&pb.Ticket{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
"otherfield": "othervalue",
},
},
},
&pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "field",
Value: "value",
},
},
},
},
{
"TagPresent simple negative", // and case sensitivity
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"MYTAG",
},
},
},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "mytag",
},
},
},
},
{
"TagPresent multiple with one missing",
&pb.Ticket{
SearchFields: &pb.SearchFields{
Tags: []string{
"A", "B", "C",
},
},
},
&pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "A",
},
{
Tag: "D",
},
{
Tag: "C",
},
},
},
},
{
"CreatedBefore simple negative",
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
},
},
{
"CreatedAfter simple negative",
&pb.Ticket{},
&pb.Pool{
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
},
},
{
"Created before time range negative",
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * 2)),
CreatedAfter: timestamp(now.Add(time.Hour * 1)),
},
},
{
"Created after time range negative",
&pb.Ticket{},
&pb.Pool{
CreatedBefore: timestamp(now.Add(time.Hour * -1)),
CreatedAfter: timestamp(now.Add(time.Hour * -2)),
},
},
multipleFilters(false, true, true),
multipleFilters(true, false, true),
multipleFilters(true, true, false),
}
}
func simpleDoubleRange(name string, value, min, max float64) TestCase {
return TestCase{
"double range " + name,
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"field": value,
},
},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "field",
Min: min,
Max: max,
},
},
},
}
}
func multipleFilters(doubleRange, stringEquals, tagPresent bool) TestCase {
a := float64(0)
if !doubleRange {
a = 10
}
b := "hi"
if !stringEquals {
b = "bye"
}
c := "yo"
if !tagPresent {
c = "cya"
}
return TestCase{
fmt.Sprintf("multiplefilters: %v, %v, %v", doubleRange, stringEquals, tagPresent),
&pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"a": a,
},
StringArgs: map[string]string{
"b": b,
},
Tags: []string{c},
},
},
&pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "a",
Min: -1,
Max: 1,
},
},
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "b",
Value: "hi",
},
},
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "yo",
},
},
},
}
}
func timestamp(t time.Time) *tspb.Timestamp {
tsp, err := ptypes.TimestampProto(t)
if err != nil {
panic(err)
}
return tsp
}

@ -0,0 +1,70 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package omerror
import (
"context"
"github.com/sirupsen/logrus"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ProtoFromErr converts an error into a grpc status. It differs from
// google.golang.org/grpc/status in that it will return an OK code on nil, and
// returns the proper codes for context cancelation and deadline exceeded.
func ProtoFromErr(err error) *spb.Status {
switch err {
case nil:
return &spb.Status{Code: int32(codes.OK)}
case context.DeadlineExceeded:
fallthrough
case context.Canceled:
return status.FromContextError(err).Proto()
default:
return status.Convert(err).Proto()
}
}
// WaitFunc will wait until all called functions return. WaitFunc returns the
// first error returned, otherwise it returns nil.
type WaitFunc func() error
// WaitOnErrors immediately starts a new go routine for each function passed it.
// It returns a WaitFunc. Any additional errors not returned are instead logged.
func WaitOnErrors(logger *logrus.Entry, fs ...func() error) WaitFunc {
errors := make(chan error, len(fs))
for _, f := range fs {
go func(f func() error) {
errors <- f()
}(f)
}
return func() error {
var first error
for range fs {
err := <-errors
if first == nil {
first = err
} else {
if err != nil {
logger.WithError(err).Warning("Multiple errors occurred in parallel execution. This error is suppressed by the error returned.")
}
}
}
return first
}
}

@ -0,0 +1,123 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package omerror
import (
"context"
"fmt"
"testing"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestProtoFromErr(t *testing.T) {
tests := []struct {
err error
want *spb.Status
}{
{
nil,
&spb.Status{Code: int32(codes.OK)},
},
{
context.Canceled,
&spb.Status{Code: int32(codes.Canceled), Message: "context canceled"},
},
{
context.DeadlineExceeded,
&spb.Status{Code: int32(codes.DeadlineExceeded), Message: "context deadline exceeded"},
},
{
fmt.Errorf("monkeys with no hats"),
&spb.Status{Code: int32(codes.Unknown), Message: "monkeys with no hats"},
},
{
status.Errorf(codes.Internal, "even the lemurs have no hats"),
&spb.Status{Code: int32(codes.Internal), Message: "even the lemurs have no hats"},
},
}
for _, tc := range tests {
require.Equal(t, tc.want, ProtoFromErr(tc.err))
}
}
func TestWaitOnErrors(t *testing.T) {
errA := fmt.Errorf("the fish have the hats")
errB := fmt.Errorf("who gave the fish hats")
tests := []struct {
err error
fs []func() error
logged bool
log string
}{
{
nil, []func() error{}, false, "",
},
{
errA,
[]func() error{
func() error {
return errA
},
},
false, "",
},
{
nil,
[]func() error{
func() error {
return nil
},
},
false, "",
},
{
errB,
[]func() error{
func() error {
return errB
},
func() error {
return errB
},
},
true, "Multiple errors occurred in parallel execution. This error is suppressed by the error returned.",
},
}
for _, tc := range tests {
logger, hook := test.NewNullLogger()
wait := WaitOnErrors(logrus.NewEntry(logger), tc.fs...)
require.Equal(t, tc.err, wait())
if tc.logged {
require.Equal(t, 1, len(hook.Entries))
require.Equal(t, logrus.WarnLevel, hook.LastEntry().Level)
require.Equal(t, tc.log, hook.LastEntry().Message)
} else {
require.Nil(t, hook.LastEntry())
}
}
_ = errB /////////////////////////////////////////////////////////////////////////////
}

@ -15,11 +15,10 @@
package rpc
import (
"net/http"
"sync"
"google.golang.org/grpc"
"net/http"
"open-match.dev/open-match/internal/config"
"sync"
)
// ClientCache holds GRPC and HTTP clients based on an address.

@ -15,10 +15,9 @@
package rpc
import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"testing"
)
const (

@ -36,6 +36,7 @@ import (
"github.com/sirupsen/logrus"
"go.opencensus.io/plugin/ochttp"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@ -310,7 +311,7 @@ func newGRPCDialOptions(enableMetrics bool, enableRPCLogging bool, enableRPCPayl
opts := []grpc.DialOption{
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(si...)),
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(ui...)),
grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`),
grpc.WithBalancerName(roundrobin.Name),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 20 * time.Second,
Timeout: 10 * time.Second,

@ -16,20 +16,19 @@ package rpc
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"io/ioutil"
"net/http"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/telemetry"
shellTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
certgenTesting "open-match.dev/open-match/tools/certgen/testing"
"os"
"testing"
)
func TestSecureGRPCFromConfig(t *testing.T) {

@ -67,11 +67,9 @@ func TestObtain(t *testing.T) {
listener, err := lh.Obtain()
if err != nil {
atomic.AddUint64(&errCount, 1)
}
if listener != nil {
} else if listener != nil {
atomic.AddUint64(&obtainCount, 1)
}
if err != nil && listener != nil {
} else {
t.Error("err and listener were both nil.")
}
wg.Done()

@ -16,18 +16,17 @@ package rpc
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"io/ioutil"
"net/http"
"open-match.dev/open-match/internal/telemetry"
shellTesting "open-match.dev/open-match/internal/testing"
utilTesting "open-match.dev/open-match/internal/util/testing"
"open-match.dev/open-match/pkg/pb"
"strings"
"testing"
"time"
)
func TestStartStopServer(t *testing.T) {

85
internal/set/set.go Normal file

@ -0,0 +1,85 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package set provides helper methods for handling sets.
package set
// Intersection returns the intersection of two sets.
func Intersection(a []string, b []string) (out []string) {
hash := make(map[string]bool)
for _, v := range a {
hash[v] = true
}
for _, v := range b {
if _, found := hash[v]; found {
out = append(out, v)
}
}
return out
}
// Union returns the union of two sets.
func Union(a []string, b []string) (out []string) {
hash := make(map[string]bool)
// collect all values from input args
for _, v := range a {
hash[v] = true
}
for _, v := range b {
hash[v] = true
}
// put values into string array
for k := range hash {
out = append(out, k)
}
return out
}
// Difference returns the items in the first argument that are not in the
// second (set 'a' - set 'b')
func Difference(a []string, b []string) (out []string) {
hash := make(map[string]bool)
out = append([]string{}, a...)
for _, v := range b {
hash[v] = true
}
// Iterate through output, removing items found in b
for i := 0; i < len(out); {
if _, found := hash[out[i]]; found {
// Remove this element by moving the copying the last element of the
// array to this index and then slicing off the last element.
// https://stackoverflow.com/a/37335777/3113674
out[i] = out[len(out)-1]
out = out[:len(out)-1]
} else {
i++
}
}
return out
}

62
internal/set/set_test.go Normal file

@ -0,0 +1,62 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package set
import (
"fmt"
"sort"
"testing"
"github.com/stretchr/testify/assert"
)
type stringOperation func([]string, []string) []string
func TestStringOperations(t *testing.T) {
assert := assert.New(t)
a1 := []string{"a", "b", "c", "d", "e"}
a2 := []string{"b", "c", "f", "g"}
i := []string{"b", "c"}
u := []string{"a", "b", "c", "d", "e", "f", "g"}
d := []string{"a", "d", "e"}
var setTests = []struct {
in1 []string
in2 []string
expected []string
op stringOperation
}{
{a1, a1, []string{}, Difference},
{a1, a2, d, Difference},
{a1, nil, a1, Difference},
{nil, a2, []string{}, Difference},
{a1, a2, u, Union},
{nil, a2, a2, Union},
{a1, nil, a1, Union},
{a1, a2, i, Intersection},
{a1, nil, nil, Intersection},
{nil, a2, nil, Intersection},
}
for i, tt := range setTests {
t.Run(fmt.Sprintf("%#v-%d", tt.op, i), func(t *testing.T) {
actual := tt.op(tt.in1, tt.in2)
sort.Strings(tt.expected)
sort.Strings(actual)
assert.EqualValues(tt.expected, actual)
})
}
}

@ -28,8 +28,7 @@ var (
mStateStoreDeleteTicketCount = telemetry.Counter("statestore/deleteticketcount", "number of tickets deleted")
mStateStoreIndexTicketCount = telemetry.Counter("statestore/indexticketcount", "number of tickets indexed")
mStateStoreDeindexTicketCount = telemetry.Counter("statestore/deindexticketcount", "number of tickets deindexed")
mStateStoreGetTicketsCount = telemetry.Counter("statestore/getticketscount", "number of bulk ticket retrievals")
mStateStoreGetIndexedIDSetCount = telemetry.Counter("statestore/getindexedidsetcount", "number of bulk indexed id retrievals")
mStateStoreFilterTicketsCount = telemetry.Counter("statestore/filterticketcount", "number of tickets that were filtered and returned")
mStateStoreUpdateAssignmentsCount = telemetry.Counter("statestore/updateassignmentcount", "number of tickets assigned")
mStateStoreGetAssignmentsCount = telemetry.Counter("statestore/getassignmentscount", "number of ticket assigned retrieved")
mStateStoreAddTicketsToIgnoreListCount = telemetry.Counter("statestore/addticketstoignorelistcount", "number of tickets moved to ignore list")
@ -92,29 +91,30 @@ func (is *instrumentedService) DeindexTicket(ctx context.Context, id string) err
return is.s.DeindexTicket(ctx, id)
}
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
func (is *instrumentedService) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetTickets")
// FilterTickets returns the Ticket ids and required attribute key-value pairs for the Tickets meeting the specified filtering criteria.
// map[ticket.Id]map[attributeName][attributeValue]
// {
// "testplayer1": {"ranking" : 56, "loyalty_level": 4},
// "testplayer2": {"ranking" : 50, "loyalty_level": 3},
// }
func (is *instrumentedService) FilterTickets(ctx context.Context, pool *pb.Pool, pageSize int, callback func([]*pb.Ticket) error) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.FilterTickets")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetTicketsCount)
return is.s.GetTickets(ctx, ids)
return is.s.FilterTickets(ctx, pool, pageSize, func(t []*pb.Ticket) error {
defer telemetry.RecordNUnitMeasurement(ctx, mStateStoreFilterTicketsCount, int64(len(t)))
return callback(t)
})
}
// GetIndexedIds returns the ids of all tickets currently indexed.
func (is *instrumentedService) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.GetIndexedIDSet")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreGetIndexedIDSetCount)
return is.s.GetIndexedIDSet(ctx)
}
// UpdateAssignments update using the request's specified tickets with assignments.
func (is *instrumentedService) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
// UpdateAssignments update the match assignments for the input ticket ids.
// This function guarantees if any of the input ids does not exists, the state of the storage service won't be altered.
// However, since Redis does not support transaction roll backs (see https://redis.io/topics/transactions), some of the
// assignment fields might be partially updated if this function encounters an error halfway through the execution.
func (is *instrumentedService) UpdateAssignments(ctx context.Context, ids []string, assignment *pb.Assignment) error {
ctx, span := trace.StartSpan(ctx, "statestore/instrumented.UpdateAssignments")
defer span.End()
defer telemetry.RecordUnitMeasurement(ctx, mStateStoreUpdateAssignmentsCount)
return is.s.UpdateAssignments(ctx, req)
return is.s.UpdateAssignments(ctx, ids, assignment)
}
// GetAssignments returns the assignment associated with the input ticket id

@ -36,21 +36,17 @@ type Service interface {
// DeleteTicket removes the Ticket with the specified id from state storage. This method succeeds if the Ticket does not exist.
DeleteTicket(ctx context.Context, id string) error
// IndexTicket adds the ticket to the index.
// IndexTicket indexes the Ticket id for the configured index fields.
IndexTicket(ctx context.Context, ticket *pb.Ticket) error
// DeindexTicket removes specified ticket from the index. The Ticket continues to exist.
// DeindexTicket removes the indexing for the specified Ticket. Only the indexes are removed but the Ticket continues to exist.
DeindexTicket(ctx context.Context, id string) error
// GetIndexedIDSet returns the ids of all tickets currently indexed.
GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error)
// FilterTickets returns the Ticket ids for the Tickets meeting the specified filtering criteria.
FilterTickets(ctx context.Context, pool *pb.Pool, pageSize int, callback func([]*pb.Ticket) error) error
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error)
// UpdateAssignments update using the request's specified tickets with assignments.
UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error)
// UpdateAssignments update the match assignments for the input ticket ids
UpdateAssignments(ctx context.Context, ids []string, assignment *pb.Assignment) error
// GetAssignments returns the assignment associated with the input ticket id
GetAssignments(ctx context.Context, id string, callback func(*pb.Assignment) error) error

@ -27,12 +27,11 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/set"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/pkg/pb"
)
const allTickets = "allTickets"
var (
redisLogger = logrus.WithFields(logrus.Fields{
"app": "openmatch",
@ -56,118 +55,64 @@ func (rb *redisBackend) Close() error {
// newRedis creates a statestore.Service backed by Redis database.
func newRedis(cfg config.View) Service {
// As per https://www.iana.org/assignments/uri-schemes/prov/redis
// redis://user:secret@localhost:6379/0?foo=bar&qux=baz
// Add redis user and password to connection url if they exist
redisURL := "redis://"
maskedURL := redisURL
passwordFile := cfg.GetString("redis.passwordPath")
if len(passwordFile) > 0 {
redisLogger.Debugf("loading Redis password from file %s", passwordFile)
passwordData, err := ioutil.ReadFile(passwordFile)
if err != nil {
redisLogger.Fatalf("cannot read Redis password from file %s, desc: %s", passwordFile, err.Error())
}
redisURL += fmt.Sprintf("%s:%s@", cfg.GetString("redis.user"), string(passwordData))
maskedURL += fmt.Sprintf("%s:%s@", cfg.GetString("redis.user"), "**********")
}
redisURL += cfg.GetString("redis.hostname") + ":" + cfg.GetString("redis.port")
maskedURL += cfg.GetString("redis.hostname") + ":" + cfg.GetString("redis.port")
redisLogger.WithField("redisURL", maskedURL).Debug("Attempting to connect to Redis")
pool := &redis.Pool{
MaxIdle: cfg.GetInt("redis.pool.maxIdle"),
MaxActive: cfg.GetInt("redis.pool.maxActive"),
IdleTimeout: cfg.GetDuration("redis.pool.idleTimeout"),
Wait: true,
TestOnBorrow: func(c redis.Conn, _ time.Time) error {
_, err := c.Do("PING")
return err
},
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(redisURL, redis.DialConnectTimeout(cfg.GetDuration("redis.pool.idleTimeout")), redis.DialReadTimeout(cfg.GetDuration("redis.pool.idleTimeout")))
},
}
healthCheckPool := &redis.Pool{
MaxIdle: 3,
MaxActive: 0,
IdleTimeout: 10 * cfg.GetDuration("redis.pool.healthCheckTimeout"),
Wait: true,
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(redisURL, redis.DialConnectTimeout(cfg.GetDuration("redis.pool.healthCheckTimeout")), redis.DialReadTimeout(cfg.GetDuration("redis.pool.healthCheckTimeout")))
},
}
return &redisBackend{
healthCheckPool: getHealthCheckPool(cfg),
redisPool: getRedisPool(cfg),
healthCheckPool: healthCheckPool,
redisPool: pool,
cfg: cfg,
}
}
func getHealthCheckPool(cfg config.View) *redis.Pool {
var healthCheckURL string
var maxIdle = 3
var maxActive = 0
var healthCheckTimeout = cfg.GetDuration("redis.pool.healthCheckTimeout")
if cfg.IsSet("redis.sentinelHostname") {
sentinelAddr := getSentinelAddr(cfg)
healthCheckURL = redisURLFromAddr(sentinelAddr, cfg, cfg.GetBool("redis.sentinelUsePassword"))
} else {
masterAddr := getMasterAddr(cfg)
healthCheckURL = redisURLFromAddr(masterAddr, cfg, cfg.GetBool("redis.usePassword"))
}
return &redis.Pool{
MaxIdle: maxIdle,
MaxActive: maxActive,
IdleTimeout: 10 * healthCheckTimeout,
Wait: true,
TestOnBorrow: testOnBorrow,
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(healthCheckURL, redis.DialConnectTimeout(healthCheckTimeout), redis.DialReadTimeout(healthCheckTimeout))
},
}
}
func getRedisPool(cfg config.View) *redis.Pool {
var dialFunc func(context.Context) (redis.Conn, error)
maxIdle := cfg.GetInt("redis.pool.maxIdle")
maxActive := cfg.GetInt("redis.pool.maxActive")
idleTimeout := cfg.GetDuration("redis.pool.idleTimeout")
if cfg.IsSet("redis.sentinelHostname") {
sentinelPool := getSentinelPool(cfg)
dialFunc = func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
sentinelConn, err := sentinelPool.GetContext(ctx)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("failed to connect to redis sentinel")
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
masterInfo, err := redis.Strings(sentinelConn.Do("SENTINEL", "GET-MASTER-ADDR-BY-NAME", cfg.GetString("redis.sentinelMaster")))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"error": err.Error(),
}).Error("failed to get current master from redis sentinel")
return nil, status.Errorf(codes.Unavailable, "%v", err)
}
masterURL := redisURLFromAddr(fmt.Sprintf("%s:%s", masterInfo[0], masterInfo[1]), cfg, cfg.GetBool("redis.usePassword"))
return redis.DialURL(masterURL, redis.DialConnectTimeout(idleTimeout), redis.DialReadTimeout(idleTimeout))
}
} else {
masterAddr := getMasterAddr(cfg)
masterURL := redisURLFromAddr(masterAddr, cfg, cfg.GetBool("redis.usePassword"))
dialFunc = func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
return redis.DialURL(masterURL, redis.DialConnectTimeout(idleTimeout), redis.DialReadTimeout(idleTimeout))
}
}
return &redis.Pool{
MaxIdle: maxIdle,
MaxActive: maxActive,
IdleTimeout: idleTimeout,
Wait: true,
TestOnBorrow: testOnBorrow,
DialContext: dialFunc,
}
}
func getSentinelPool(cfg config.View) *redis.Pool {
maxIdle := cfg.GetInt("redis.pool.maxIdle")
maxActive := cfg.GetInt("redis.pool.maxActive")
idleTimeout := cfg.GetDuration("redis.pool.idleTimeout")
sentinelAddr := getSentinelAddr(cfg)
sentinelURL := redisURLFromAddr(sentinelAddr, cfg, cfg.GetBool("redis.sentinelUsePassword"))
return &redis.Pool{
MaxIdle: maxIdle,
MaxActive: maxActive,
IdleTimeout: idleTimeout,
Wait: true,
TestOnBorrow: testOnBorrow,
DialContext: func(ctx context.Context) (redis.Conn, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
redisLogger.WithField("sentinelAddr", sentinelAddr).Debug("Attempting to connect to Redis Sentinel")
return redis.DialURL(sentinelURL, redis.DialConnectTimeout(idleTimeout), redis.DialReadTimeout(idleTimeout))
},
}
}
// HealthCheck indicates if the database is reachable.
func (rb *redisBackend) HealthCheck(ctx context.Context) error {
redisConn, err := rb.healthCheckPool.GetContext(ctx)
@ -189,44 +134,6 @@ func (rb *redisBackend) HealthCheck(ctx context.Context) error {
return nil
}
func testOnBorrow(c redis.Conn, lastUsed time.Time) error {
// Assume the connection is valid if it was used in 30 sec.
if time.Since(lastUsed) < 15*time.Second {
return nil
}
_, err := c.Do("PING")
return err
}
func getSentinelAddr(cfg config.View) string {
return fmt.Sprintf("%s:%s", cfg.GetString("redis.sentinelHostname"), cfg.GetString("redis.sentinelPort"))
}
func getMasterAddr(cfg config.View) string {
return fmt.Sprintf("%s:%s", cfg.GetString("redis.hostname"), cfg.GetString("redis.port"))
}
func redisURLFromAddr(addr string, cfg config.View, usePassword bool) string {
// As per https://www.iana.org/assignments/uri-schemes/prov/redis
// redis://user:secret@localhost:6379/0?foo=bar&qux=baz
// Add redis user and password to connection url if they exist
redisURL := "redis://"
if usePassword {
passwordFile := cfg.GetString("redis.passwordPath")
redisLogger.Debugf("loading Redis password from file %s", passwordFile)
passwordData, err := ioutil.ReadFile(passwordFile)
if err != nil {
redisLogger.Fatalf("cannot read Redis password from file %s, desc: %s", passwordFile, err.Error())
}
redisURL += fmt.Sprintf("%s:%s@", cfg.GetString("redis.user"), string(passwordData))
}
return redisURL + addr
}
func (rb *redisBackend) connect(ctx context.Context) (redis.Conn, error) {
startTime := time.Now()
redisConn, err := rb.redisPool.GetContext(ctx)
@ -249,6 +156,15 @@ func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) err
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("MULTI")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "MULTI",
"error": err.Error(),
}).Error("state storage operation failed")
return status.Errorf(codes.Internal, "%v", err)
}
value, err := proto.Marshal(ticket)
if err != nil {
redisLogger.WithFields(logrus.Fields{
@ -258,7 +174,7 @@ func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) err
return status.Errorf(codes.Internal, "%v", err)
}
_, err = redisConn.Do("SET", ticket.GetId(), value)
err = redisConn.Send("SET", ticket.GetId(), value)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SET",
@ -268,6 +184,32 @@ func (rb *redisBackend) CreateTicket(ctx context.Context, ticket *pb.Ticket) err
return status.Errorf(codes.Internal, "%v", err)
}
if rb.cfg.IsSet("redis.expiration") {
redisTTL := rb.cfg.GetInt("redis.expiration")
if redisTTL > 0 {
err = redisConn.Send("EXPIRE", ticket.GetId(), redisTTL)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "EXPIRE",
"key": ticket.GetId(),
"ttl": redisTTL,
"error": err.Error(),
}).Error("failed to set ticket expiration in state storage")
return status.Errorf(codes.Internal, "%v", err)
}
}
}
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "EXEC",
"key": ticket.GetId(),
"error": err.Error(),
}).Error("failed to create ticket in state storage")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
@ -351,14 +293,58 @@ func (rb *redisBackend) IndexTicket(ctx context.Context, ticket *pb.Ticket) erro
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("SADD", allTickets, ticket.Id)
indexedFields := extractIndexedFields(ticket)
err = redisConn.Send("MULTI")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SADD",
"ticket": ticket.GetId(),
"error": err.Error(),
"key": allTickets,
}).Error("failed to add ticket to all tickets")
"cmd": "MULTI",
"error": err.Error(),
}).Error("state storage operation failed")
return status.Errorf(codes.Internal, "%v", err)
}
{
command := make([]interface{}, 0, 1+len(indexedFields))
command = append(command, indexCacheName(ticket.Id))
for index := range indexedFields {
command = append(command, index)
}
err = redisConn.Send("SADD", command...)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SADD",
"ticket": ticket.GetId(),
"error": err.Error(),
"indices": command[1:],
}).Error("failed to set ticket's indices")
return status.Errorf(codes.Internal, "%v", err)
}
}
for k, v := range indexedFields {
// Index the DoubleArg by value.
err = redisConn.Send("ZADD", k, v, ticket.GetId())
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "ZADD",
"DoubleArg": k,
"value": v,
"ticket": ticket.GetId(),
"error": err.Error(),
}).Error("failed to index ticket DoubleArg")
return status.Errorf(codes.Internal, "%v", err)
}
}
// Run pipelined Redis commands.
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "EXEC",
"id": ticket.GetId(),
"error": err.Error(),
}).Error("failed to index the ticket")
return status.Errorf(codes.Internal, "%v", err)
}
@ -373,25 +359,80 @@ func (rb *redisBackend) DeindexTicket(ctx context.Context, id string) error {
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("SREM", allTickets, id)
indices, err := redis.Strings(redisConn.Do("SMEMBERS", indexCacheName(id)))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "SREM",
"key": allTickets,
"SMEMBERS": "MULTI",
"error": err.Error(),
"ticket": id,
}).Error("failed to retrieve ticket's indexed fields")
return status.Errorf(codes.Internal, "%v", err)
}
if len(indices) == 0 {
return nil
}
err = redisConn.Send("MULTI")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "MULTI",
"error": err.Error(),
}).Error("state storage operation failed")
return status.Errorf(codes.Internal, "%v", err)
}
for _, index := range indices {
err = redisConn.Send("ZREM", index, id)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "ZREM",
"index": index,
"id": id,
"error": err.Error(),
}).Error("failed to deindex the ticket")
return status.Errorf(codes.Internal, "%v", err)
}
}
err = redisConn.Send("DEL", indexCacheName(id))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "DEL",
"id": id,
"error": err.Error(),
}).Error("failed to remove ticket from all tickets")
}).Error("failed to remove ticket's indexed fields")
return status.Errorf(codes.Internal, "%v", err)
}
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithFields(logrus.Fields{
"cmd": "EXEC",
"id": id,
"error": err.Error(),
}).Error("failed to deindex the ticket")
return status.Errorf(codes.Internal, "%v", err)
}
return nil
}
// GetIndexedIds returns the ids of all tickets currently indexed.
func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{}, error) {
redisConn, err := rb.connect(ctx)
// FilterTickets returns the Ticket ids and required DoubleArg key-value pairs for the Tickets meeting the specified filtering criteria.
// map[ticket.Id]map[DoubleArgName][DoubleArgValue]
// {
// "testplayer1": {"ranking" : 56, "loyalty_level": 4},
// "testplayer2": {"ranking" : 50, "loyalty_level": 3},
// }
func (rb *redisBackend) FilterTickets(ctx context.Context, pool *pb.Pool, pageSize int, callback func([]*pb.Ticket) error) error {
var err error
var redisConn redis.Conn
var ticketBytes [][]byte
var idsInFilter, idsInIgnoreLists []string
redisConn, err = rb.connect(ctx)
if err != nil {
return nil, err
return err
}
defer handleConnectionClose(&redisConn)
@ -401,155 +442,143 @@ func (rb *redisBackend) GetIndexedIDSet(ctx context.Context) (map[string]struct{
startTimeInt := curTime.Add(-ttl).UnixNano()
// Filter out tickets that are fetched but not assigned within ttl time (ms).
idsInIgnoreLists, err := redis.Strings(redisConn.Do("ZRANGEBYSCORE", "proposed_ticket_ids", startTimeInt, curTimeInt))
idsInIgnoreLists, err = redis.Strings(redisConn.Do("ZRANGEBYSCORE", "proposed_ticket_ids", startTimeInt, curTimeInt))
if err != nil {
redisLogger.WithError(err).Error("failed to get proposed tickets")
return nil, status.Errorf(codes.Internal, "error getting ignore list %v", err)
return status.Errorf(codes.Internal, err.Error())
}
idsIndexed, err := redis.Strings(redisConn.Do("SMEMBERS", allTickets))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"Command": "SMEMBER allTickets",
}).WithError(err).Error("Failed to lookup all tickets.")
return nil, status.Errorf(codes.Internal, "error getting all indexed ticket ids %v", err)
}
// A set of playerIds that satisfies all filters
idSet := make([]string, 0)
r := make(map[string]struct{}, len(idsIndexed))
for _, id := range idsIndexed {
r[id] = struct{}{}
}
for _, id := range idsInIgnoreLists {
delete(r, id)
}
return r, nil
}
// GetTickets returns multiple tickets from storage. Missing tickets are
// silently ignored.
func (rb *redisBackend) GetTickets(ctx context.Context, ids []string) ([]*pb.Ticket, error) {
if len(ids) == 0 {
return nil, nil
}
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
queryParams := make([]interface{}, len(ids))
for i, id := range ids {
queryParams[i] = id
}
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", queryParams...))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"Command": fmt.Sprintf("MGET %v", ids),
}).WithError(err).Error("Failed to lookup tickets.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
r := make([]*pb.Ticket, 0, len(ids))
for i, b := range ticketBytes {
// Tickets may be deleted by the time we read it from redis.
if b != nil {
t := &pb.Ticket{}
err = proto.Unmarshal(b, t)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": ids[i],
}).WithError(err).Error("Failed to unmarshal ticket from redis.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
r = append(r, t)
}
}
return r, nil
}
// UpdateAssignments update using the request's specified tickets with assignments.
func (rb *redisBackend) UpdateAssignments(ctx context.Context, req *pb.AssignTicketsRequest) (*pb.AssignTicketsResponse, error) {
resp := &pb.AssignTicketsResponse{}
if len(req.Assignments) == 0 {
return resp, nil
}
idToA := make(map[string]*pb.Assignment)
ids := make([]string, 0)
idsI := make([]interface{}, 0)
for _, a := range req.Assignments {
if a.Assignment == nil {
return nil, status.Error(codes.InvalidArgument, "AssignmentGroup.Assignment is required")
}
for _, id := range a.TicketIds {
if _, ok := idToA[id]; ok {
return nil, status.Errorf(codes.InvalidArgument, "Ticket id %s is assigned multiple times in one assign tickets call.", id)
}
idToA[id] = a.Assignment
ids = append(ids, id)
idsI = append(idsI, id)
}
}
redisConn, err := rb.connect(ctx)
if err != nil {
return nil, err
}
defer handleConnectionClose(&redisConn)
ticketBytes, err := redis.ByteSlices(redisConn.Do("MGET", idsI...))
if err != nil {
return nil, err
}
tickets := make([]*pb.Ticket, 0, len(ticketBytes))
for i, ticketByte := range ticketBytes {
// Tickets may be deleted by the time we read it from redis.
if ticketByte == nil {
resp.Failures = append(resp.Failures, &pb.AssignmentFailure{
TicketId: ids[i],
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
})
} else {
t := &pb.Ticket{}
err = proto.Unmarshal(ticketByte, t)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": ids[i],
}).WithError(err).Error("failed to unmarshal ticket from redis.")
return nil, status.Errorf(codes.Internal, "%v", err)
}
tickets = append(tickets, t)
}
}
cmds := make([]interface{}, 0, 2*len(tickets))
for _, ticket := range tickets {
ticket.Assignment = idToA[ticket.Id]
var ticketByte []byte
ticketByte, err = proto.Marshal(ticket)
// For each filter, do a range query to Redis on Filter.DoubleArg
for i, filter := range extractIndexFilters(pool) {
// Time Complexity O(logN + M), where N is the number of elements in the DoubleArg set
// and M is the number of entries being returned.
// TODO: discuss if we need a LIMIT for # of queries being returned
idsInFilter, err = redis.Strings(redisConn.Do("ZRANGEBYSCORE", filter.name, filter.min, filter.max))
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to marshal ticket %s", ticket.GetId())
redisLogger.WithFields(logrus.Fields{
"Command": fmt.Sprintf("ZRANGEBYSCORE %s %f %f", filter.name, filter.min, filter.max),
}).WithError(err).Error("Failed to lookup index.")
return status.Errorf(codes.Internal, "%v", err)
}
cmds = append(cmds, ticket.GetId(), ticketByte)
if i == 0 {
idSet = idsInFilter
} else {
idSet = set.Intersection(idSet, idsInFilter)
}
}
_, err = redisConn.Do("MSET", cmds...)
idSet = set.Difference(idSet, idsInIgnoreLists)
// TODO: finish reworking this after the proto changes.
for _, page := range idsToPages(idSet, pageSize) {
ticketBytes, err = redis.ByteSlices(redisConn.Do("MGET", page...))
if err != nil {
redisLogger.WithFields(logrus.Fields{
"Command": fmt.Sprintf("MGET %v", page),
}).WithError(err).Error("Failed to lookup tickets.")
return status.Errorf(codes.Internal, "%v", err)
}
tickets := make([]*pb.Ticket, 0, len(page))
for i, b := range ticketBytes {
// Tickets may be deleted by the time we read it from redis.
if b != nil {
t := &pb.Ticket{}
err = proto.Unmarshal(b, t)
if err != nil {
redisLogger.WithFields(logrus.Fields{
"key": page[i],
}).WithError(err).Error("Failed to unmarshal ticket from redis.")
return status.Errorf(codes.Internal, "%v", err)
}
tickets = append(tickets, t)
}
}
err = callback(tickets)
if err != nil {
return status.Errorf(codes.Internal, "%v", err)
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
}
return nil
}
// UpdateAssignments update the match assignments for the input ticket ids.
// This function guarantees if any of the input ids does not exists, the state of the storage service won't be altered.
// However, since Redis does not support transaction roll backs (see https://redis.io/topics/transactions), some of the
// assignment fields might be partially updated if this function encounters an error halfway through the execution.
func (rb *redisBackend) UpdateAssignments(ctx context.Context, ids []string, assignment *pb.Assignment) error {
if assignment == nil {
return status.Error(codes.InvalidArgument, "assignment is nil")
}
redisConn, err := rb.connect(ctx)
if err != nil {
redisLogger.WithError(err).Errorf("failed to send ticket updates to redis %s", cmds)
return nil, err
return err
}
defer handleConnectionClose(&redisConn)
err = redisConn.Send("MULTI")
if err != nil {
return err
}
return resp, nil
// Sanity check to make sure all inputs ids are valid
tickets := []*pb.Ticket{}
for _, id := range ids {
select {
case <-ctx.Done():
return ctx.Err()
default:
var ticket *pb.Ticket
ticket, err = rb.GetTicket(ctx, id)
if err != nil {
redisLogger.WithError(err).Errorf("failed to get ticket %s from redis when updating assignments", id)
return err
}
tickets = append(tickets, ticket)
}
}
for _, ticket := range tickets {
select {
case <-ctx.Done():
return ctx.Err()
default:
assignmentCopy, ok := proto.Clone(assignment).(*pb.Assignment)
if !ok {
redisLogger.Error("failed to cast assignment object")
return status.Error(codes.Internal, "failed to cast to the assignment object")
}
ticket.Assignment = assignmentCopy
err = rb.CreateTicket(ctx, ticket)
if err != nil {
redisLogger.WithError(err).Errorf("failed to recreate ticket %#v with new assignment when updating assignments", ticket)
return err
}
}
}
// Run pipelined Redis commands.
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithError(err).Error("failed to execute update assignments transaction")
return err
}
return nil
}
// GetAssignments returns the assignment associated with the input ticket id
@ -585,26 +614,32 @@ func (rb *redisBackend) GetAssignments(ctx context.Context, id string, callback
// AddProposedTickets appends new proposed tickets to the proposed sorted set with current timestamp
func (rb *redisBackend) AddTicketsToIgnoreList(ctx context.Context, ids []string) error {
if len(ids) == 0 {
return nil
}
redisConn, err := rb.connect(ctx)
if err != nil {
return err
}
defer handleConnectionClose(&redisConn)
currentTime := time.Now().UnixNano()
cmds := make([]interface{}, 0, 2*len(ids)+1)
cmds = append(cmds, "proposed_ticket_ids")
for _, id := range ids {
cmds = append(cmds, currentTime, id)
err = redisConn.Send("MULTI")
if err != nil {
redisLogger.WithError(err).Error("failed to pipeline commands for AddTicketsToIgnoreList")
return status.Error(codes.Internal, err.Error())
}
_, err = redisConn.Do("ZADD", cmds...)
currentTime := time.Now().UnixNano()
for _, id := range ids {
// Index the DoubleArg by value.
err = redisConn.Send("ZADD", "proposed_ticket_ids", currentTime, id)
if err != nil {
redisLogger.WithError(err).Error("failed to append proposed tickets to redis")
return status.Error(codes.Internal, err.Error())
}
}
// Run pipelined Redis commands.
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithError(err).Error("failed to append proposed tickets to ignore list")
redisLogger.WithError(err).Error("failed to execute pipelined commands for AddTicketsToIgnoreList")
return status.Error(codes.Internal, err.Error())
}
@ -623,21 +658,46 @@ func (rb *redisBackend) DeleteTicketsFromIgnoreList(ctx context.Context, ids []s
}
defer handleConnectionClose(&redisConn)
cmds := make([]interface{}, 0, len(ids)+1)
cmds = append(cmds, "proposed_ticket_ids")
for _, id := range ids {
cmds = append(cmds, id)
err = redisConn.Send("MULTI")
if err != nil {
redisLogger.WithError(err).Error("failed to pipeline commands for DeleteTicketsFromIgnoreList")
return status.Error(codes.Internal, err.Error())
}
_, err = redisConn.Do("ZREM", cmds...)
for _, id := range ids {
err = redisConn.Send("ZREM", "proposed_ticket_ids", id)
if err != nil {
redisLogger.WithError(err).Error("failed to delete proposed tickets from ignore list")
return status.Error(codes.Internal, err.Error())
}
}
// Run pipelined Redis commands.
_, err = redisConn.Do("EXEC")
if err != nil {
redisLogger.WithError(err).Error("failed to delete proposed tickets from ignore list")
redisLogger.WithError(err).Error("failed to execute pipelined commands for DeleteTicketsFromIgnoreList")
return status.Error(codes.Internal, err.Error())
}
return nil
}
func idsToPages(ids []string, pageSize int) [][]interface{} {
result := make([][]interface{}, 0, len(ids)/pageSize+1)
for i := 0; i < len(ids); i += pageSize {
end := i + pageSize
if end > len(ids) {
end = len(ids)
}
page := make([]interface{}, end-i)
for i, id := range ids[i:end] {
page[i] = id
}
result = append(result, page)
}
return result
}
func handleConnectionClose(conn *redis.Conn) {
err := (*conn).Close()
if err != nil {

@ -0,0 +1,124 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statestore
import (
"math"
"strings"
"open-match.dev/open-match/pkg/pb"
)
// This file translates between the Open Match API's concept of fields and
// filters to a concept compatible with redis. All indicies in redis are,
// for simplicity, sorted sets. The following translations are used:
// Float values to range indicies use the float value directly, with filters
// doing direct lookups on those ranges.
// Tags indicate presence in the set. The value used is 0. Filters on tags
// look up that set.
// Strings values are indexed by a unique DoubleArg/value pair with value 0.
// Filters are strings look up that DoubleArg/value pair.
func extractIndexedFields(t *pb.Ticket) map[string]float64 {
result := make(map[string]float64)
for arg, value := range t.GetSearchFields().GetDoubleArgs() {
result[rangeIndexName(arg)] = value
}
for arg, value := range t.GetSearchFields().GetStringArgs() {
result[stringIndexName(arg, value)] = 0
}
for _, tag := range t.GetSearchFields().GetTags() {
result[tagIndexName(tag)] = 0
}
result[allTickets] = 0
return result
}
type indexFilter struct {
name string
min, max float64
}
func extractIndexFilters(p *pb.Pool) []indexFilter {
filters := make([]indexFilter, 0)
for _, f := range p.DoubleRangeFilters {
filters = append(filters, indexFilter{
name: rangeIndexName(f.DoubleArg),
min: f.Min,
max: f.Max,
})
}
for _, f := range p.TagPresentFilters {
filters = append(filters, indexFilter{
name: tagIndexName(f.Tag),
min: 0,
max: 0,
})
}
for _, f := range p.StringEqualsFilters {
filters = append(filters, indexFilter{
name: stringIndexName(f.StringArg, f.Value),
min: 0,
max: 0,
})
}
if len(filters) == 0 {
filters = []indexFilter{{
name: allTickets,
min: math.Inf(-1),
max: math.Inf(1),
}}
}
return filters
}
// The following are constants and functions for determining the names of
// indices. Different index types have different prefixes to avoid any
// name collision.
const allTickets = "allTickets"
func rangeIndexName(arg string) string {
// ri stands for range index
return "ri$" + indexEscape(arg)
}
func tagIndexName(arg string) string {
// ti stands for tag index
return "ti$" + indexEscape(arg)
}
func stringIndexName(arg string, value string) string {
// si stands for string index
return "si$" + indexEscape(arg) + "$v" + indexEscape(value)
}
func indexCacheName(id string) string {
// ic stands for index cache
return "ic$" + indexEscape(id)
}
func indexEscape(s string) string {
return strings.ReplaceAll(s, "$", "$$")
}

@ -0,0 +1,176 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statestore
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/pkg/pb"
)
func TestExtractIndexedFields(t *testing.T) {
tests := []struct {
description string
searchFields *pb.SearchFields
expectedValues map[string]float64
}{
{
description: "range",
searchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{"foo": 1},
},
expectedValues: map[string]float64{
"allTickets": 0,
"ri$foo": 1,
},
},
{
description: "tag",
searchFields: &pb.SearchFields{
Tags: []string{"foo"},
},
expectedValues: map[string]float64{
"allTickets": 0,
"ti$foo": 0,
},
},
{
description: "string",
searchFields: &pb.SearchFields{
StringArgs: map[string]string{
"foo": "bar",
},
},
expectedValues: map[string]float64{
"allTickets": 0,
"si$foo$vbar": 0,
},
},
{
description: "no value",
expectedValues: map[string]float64{
"allTickets": 0,
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
actual := extractIndexedFields(&pb.Ticket{SearchFields: test.searchFields})
assert.Equal(t, test.expectedValues, actual)
})
}
}
func TestExtractIndexFilters(t *testing.T) {
tests := []struct {
description string
pool *pb.Pool
expected []indexFilter
}{
{
description: "empty",
pool: &pb.Pool{},
expected: []indexFilter{
{
name: "allTickets",
min: math.Inf(-1),
max: math.Inf(1),
},
},
},
{
description: "range",
pool: &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "foo",
Min: -1,
Max: 1,
},
},
},
expected: []indexFilter{
{
name: "ri$foo",
min: -1,
max: 1,
},
},
},
{
description: "tag",
pool: &pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{
{
Tag: "foo",
},
},
},
expected: []indexFilter{
{
name: "ti$foo",
min: 0,
max: 0,
},
},
},
{
description: "string equals",
pool: &pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: "foo",
Value: "bar",
},
},
},
expected: []indexFilter{
{
name: "si$foo$vbar",
min: 0,
max: 0,
},
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
actual := extractIndexFilters(test.pool)
assert.Equal(t, test.expected, actual)
})
}
}
func TestNameCollision(t *testing.T) {
names := []string{
rangeIndexName("foo"),
tagIndexName("foo"),
stringIndexName("foo", "bar"),
indexCacheName("foo"),
stringIndexName("$v", ""),
stringIndexName("", "$v"),
}
for i := 0; i < len(names); i++ {
for j := i + 1; j < len(names); j++ {
assert.NotEqual(t, names[i], names[j])
}
}
}

@ -17,14 +17,11 @@ package statestore
import (
"context"
"errors"
"io/ioutil"
"os"
"fmt"
"testing"
"time"
"github.com/Bose/minisentinel"
miniredis "github.com/alicebob/miniredis/v2"
"github.com/gomodule/redigo/redis"
"github.com/rs/xid"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
@ -39,7 +36,7 @@ import (
func TestStatestoreSetup(t *testing.T) {
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -49,7 +46,7 @@ func TestStatestoreSetup(t *testing.T) {
func TestTicketLifecycle(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -107,7 +104,7 @@ func TestTicketLifecycle(t *testing.T) {
func TestIgnoreLists(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -127,9 +124,18 @@ func TestIgnoreLists(t *testing.T) {
}
verifyTickets := func(service Service, expectLen int) {
ids, err := service.GetIndexedIDSet(ctx)
assert.Nil(err)
assert.Equal(expectLen, len(ids))
var results []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{DoubleArg: "testindex1", Min: 0, Max: 10},
{DoubleArg: "testindex2", Min: 0, Max: 10},
},
}
service.FilterTickets(ctx, pool, 100, func(tickets []*pb.Ticket) error {
results = tickets
return nil
})
assert.Equal(expectLen, len(results))
}
// Verify all tickets are created and returned
@ -147,7 +153,7 @@ func TestIgnoreLists(t *testing.T) {
func TestDeleteTicketsFromIgnoreList(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -167,9 +173,18 @@ func TestDeleteTicketsFromIgnoreList(t *testing.T) {
}
verifyTickets := func(service Service, expectLen int) {
ids, err := service.GetIndexedIDSet(ctx)
assert.Nil(err)
assert.Equal(expectLen, len(ids))
var results []*pb.Ticket
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{DoubleArg: "testindex1", Min: 0, Max: 10},
{DoubleArg: "testindex2", Min: 0, Max: 10},
},
}
service.FilterTickets(ctx, pool, 100, func(tickets []*pb.Ticket) error {
results = tickets
return nil
})
assert.Equal(expectLen, len(results))
}
// Verify all tickets are created and returned
@ -183,10 +198,86 @@ func TestDeleteTicketsFromIgnoreList(t *testing.T) {
verifyTickets(service, len(tickets))
}
func TestTicketIndexing(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
defer service.Close()
ctx := utilTesting.NewContext(t)
for i := 0; i < 10; i++ {
id := fmt.Sprintf("ticket.no.%d", i)
ticket := &pb.Ticket{
Id: id,
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
"testindex1": float64(i),
"testindex2": 0.5,
},
},
Assignment: &pb.Assignment{
Connection: "test-tbd",
},
}
err := service.CreateTicket(ctx, ticket)
assert.Nil(err)
err = service.IndexTicket(ctx, ticket)
assert.Nil(err)
}
// Remove one ticket, to test that it doesn't fall over.
err := service.DeleteTicket(ctx, "ticket.no.5")
assert.Nil(err)
// Remove ticket from index, should not show up.
err = service.DeindexTicket(ctx, "ticket.no.6")
assert.Nil(err)
found := []string{}
pool := &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{
{
DoubleArg: "testindex1",
Min: 2.5,
Max: 8.5,
},
{
DoubleArg: "testindex2",
Min: 0.49,
Max: 0.51,
},
},
}
err = service.FilterTickets(ctx, pool, 2, func(tickets []*pb.Ticket) error {
assert.True(len(tickets) <= 2)
for _, ticket := range tickets {
found = append(found, ticket.Id)
}
return nil
})
assert.Nil(err)
expected := []string{
"ticket.no.3",
"ticket.no.4",
"ticket.no.7",
"ticket.no.8",
}
assert.ElementsMatch(expected, found)
}
func TestGetAssignmentBeforeSet(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -204,10 +295,36 @@ func TestGetAssignmentBeforeSet(t *testing.T) {
assert.Nil(assignmentResp)
}
func TestUpdateAssignmentFatal(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
defer service.Close()
ctx := utilTesting.NewContext(t)
var assignmentResp *pb.Assignment
// Now create a ticket in the state store service
err := service.CreateTicket(ctx, &pb.Ticket{
Id: "1",
Assignment: &pb.Assignment{Connection: "2"},
})
assert.Nil(err)
// Try to update the assignmets with the ticket created and some non-existed tickets
err = service.UpdateAssignments(ctx, []string{"1", "2", "3"}, &pb.Assignment{Connection: "localhost"})
// UpdateAssignment failed because the ticket does not exists
assert.Equal(codes.NotFound, status.Convert(err).Code())
assert.Nil(assignmentResp)
}
func TestGetAssignmentNormal(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t, true, "")
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
@ -245,16 +362,44 @@ func TestGetAssignmentNormal(t *testing.T) {
assert.Equal(returnedErr, err)
}
func TestConnect(t *testing.T) {
testConnect(t, false, "")
testConnect(t, false, "redispassword")
testConnect(t, true, "")
testConnect(t, true, "redispassword")
func TestUpdateAssignmentNormal(t *testing.T) {
// Create State Store
assert := assert.New(t)
cfg, closer := createRedis(t)
defer closer()
service := New(cfg)
assert.NotNil(service)
defer service.Close()
ctx := utilTesting.NewContext(t)
// Create a ticket without assignment
err := service.CreateTicket(ctx, &pb.Ticket{
Id: "1",
})
assert.Nil(err)
// Create a ticket already with an assignment
err = service.CreateTicket(ctx, &pb.Ticket{
Id: "3",
Assignment: &pb.Assignment{Connection: "4"},
})
assert.Nil(err)
fakeAssignment := &pb.Assignment{Connection: "Halo"}
err = service.UpdateAssignments(ctx, []string{"1", "3"}, fakeAssignment)
assert.Nil(err)
// Verify the transaction behavior of the UpdateAssignment.
ticket, err := service.GetTicket(ctx, "1")
assert.Equal(fakeAssignment.Connection, ticket.Assignment.Connection)
assert.Nil(err)
// Verify the transaction behavior of the UpdateAssignment.
ticket, err = service.GetTicket(ctx, "3")
assert.Equal(fakeAssignment.Connection, ticket.Assignment.Connection)
assert.Nil(err)
}
func testConnect(t *testing.T, withSentinel bool, withPassword string) {
func TestConnect(t *testing.T) {
assert := assert.New(t)
cfg, closer := createRedis(t, withSentinel, withPassword)
cfg, closer := createRedis(t)
defer closer()
store := New(cfg)
defer store.Close()
@ -265,29 +410,27 @@ func testConnect(t *testing.T, withSentinel bool, withPassword string) {
rb, ok := is.s.(*redisBackend)
assert.True(ok)
ctx, cancel := context.WithCancel(ctx)
cancel()
conn, err := rb.connect(ctx)
assert.NotNil(conn)
assert.Nil(err)
rply, err := redis.String(conn.Do("PING"))
assert.Nil(err)
assert.Equal("PONG", rply)
assert.NotNil(err)
assert.Nil(conn)
}
func createRedis(t *testing.T, withSentinel bool, withPassword string) (config.View, func()) {
func createRedis(t *testing.T) (config.View, func()) {
cfg := viper.New()
closerFuncs := []func(){}
mredis := miniredis.NewMiniRedis()
err := mredis.StartAddr("localhost:0")
mredis, err := miniredis.Run()
if err != nil {
t.Fatalf("failed to start miniredis, %v", err)
t.Fatalf("cannot create redis %s", err)
}
closerFuncs = append(closerFuncs, mredis.Close)
cfg.Set("redis.pool.maxIdle", 5)
cfg.Set("redis.hostname", mredis.Host())
cfg.Set("redis.port", mredis.Port())
cfg.Set("redis.pool.maxIdle", 1000)
cfg.Set("redis.pool.idleTimeout", time.Second)
cfg.Set("redis.pool.healthCheckTimeout", 100*time.Millisecond)
cfg.Set("redis.pool.maxActive", 5)
cfg.Set("redis.pool.maxActive", 1000)
cfg.Set("redis.expiration", 42000)
cfg.Set("storage.ignoreListTTL", "200ms")
cfg.Set("backoff.initialInterval", 100*time.Millisecond)
cfg.Set("backoff.randFactor", 0.5)
@ -296,43 +439,7 @@ func createRedis(t *testing.T, withSentinel bool, withPassword string) (config.V
cfg.Set("backoff.maxElapsedTime", 100*time.Millisecond)
cfg.Set(telemetry.ConfigNameEnableMetrics, true)
if withSentinel {
s := minisentinel.NewSentinel(mredis)
err = s.StartAddr("localhost:0")
if err != nil {
t.Fatalf("failed to start minisentinel, %v", err)
}
closerFuncs = append(closerFuncs, s.Close)
cfg.Set("redis.sentinelHostname", s.Host())
cfg.Set("redis.sentinelPort", s.Port())
cfg.Set("redis.sentinelMaster", s.MasterInfo().Name)
cfg.Set("redis.sentinelEnabled", true)
// TODO: enable sentinel auth test cases when the library support it.
cfg.Set("redis.sentinelUsePassword", false)
} else {
cfg.Set("redis.hostname", mredis.Host())
cfg.Set("redis.port", mredis.Port())
}
if len(withPassword) > 0 {
mredis.RequireAuth(withPassword)
tmpFile, err := ioutil.TempFile("", "password")
if err != nil {
t.Fatal("failed to create temp file for password")
}
if _, err := tmpFile.WriteString(withPassword); err != nil {
t.Fatal("failed to write pw to temp file")
}
closerFuncs = append(closerFuncs, func() { os.Remove(tmpFile.Name()) })
cfg.Set("redis.usePassword", true)
cfg.Set("redis.passwordPath", tmpFile.Name())
}
return cfg, func() {
for _, closer := range closerFuncs {
closer()
}
}
return cfg, func() { mredis.Close() }
}
// TODO: test Redis connection with Auth

@ -18,9 +18,9 @@ import "time"
const (
// PoolMaxIdle is the number of maximum idle redis connections in a redis pool
PoolMaxIdle = 5
PoolMaxIdle = 10
// PoolMaxActive is the number of maximum active redis connections in a redis pool
PoolMaxActive = 5
PoolMaxActive = 10
// PoolIdleTimeout is the idle duration allowance of a redis connection a a redis pool
PoolIdleTimeout = 10 * time.Second
// PoolHealthCheckTimeout is the read/write timeout of a healthcheck HTTP request

@ -17,30 +17,19 @@ package testing
import (
"testing"
"github.com/Bose/minisentinel"
miniredis "github.com/alicebob/miniredis/v2"
"open-match.dev/open-match/internal/config"
"open-match.dev/open-match/internal/statestore"
)
// New creates a new in memory Redis instance with Sentinel for testing.
// New creates a new in memory Redis instance for testing.
func New(t *testing.T, cfg config.Mutable) func() {
mredis := miniredis.NewMiniRedis()
err := mredis.StartAddr("localhost:0")
mredis, err := miniredis.Run()
if err != nil {
t.Fatalf("failed to start miniredis, %v", err)
t.Fatalf("failed to create miniredis, %v", err)
}
s := minisentinel.NewSentinel(mredis)
err = s.StartAddr("localhost:0")
if err != nil {
t.Fatalf("failed to start minisentinel, %v", err)
}
cfg.Set("redis.sentinelEnabled", true)
cfg.Set("redis.sentinelHostname", s.Host())
cfg.Set("redis.sentinelPort", s.Port())
cfg.Set("redis.sentinelMaster", s.MasterInfo().Name)
cfg.Set("redis.hostname", mredis.Host())
cfg.Set("redis.port", mredis.Port())
cfg.Set("redis.pool.maxIdle", PoolMaxIdle)
cfg.Set("redis.pool.maxActive", PoolMaxActive)
cfg.Set("redis.pool.idleTimeout", PoolIdleTimeout)
@ -53,7 +42,6 @@ func New(t *testing.T, cfg config.Mutable) func() {
cfg.Set("backoff.maxElapsedTime", MaxElapsedTime)
return func() {
s.Close()
mredis.Close()
}
}

@ -22,10 +22,9 @@ import (
"bufio"
"bytes"
"fmt"
"strings"
"github.com/spf13/viper"
"open-match.dev/open-match/internal/config"
"strings"
)
const (

@ -15,12 +15,11 @@
package telemetry
import (
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"net/http"
"net/url"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func TestConfigz(t *testing.T) {

@ -15,11 +15,10 @@
package telemetry
import (
"github.com/stretchr/testify/assert"
"net/http"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
)
func TestHelp(t *testing.T) {

@ -17,12 +17,11 @@ package telemetry
import (
"context"
"fmt"
"github.com/stretchr/testify/assert"
"net/http"
"net/url"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
)
func angryHealthCheck(context.Context) error {
@ -48,7 +47,6 @@ func TestHealthCheck(t *testing.T) {
{"angryHealthCheck", []func(context.Context) error{angryHealthCheck}, "I'm angry"},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
assertHealthCheck(t, NewHealthCheck(tc.healthChecks), tc.errorString)
})

@ -15,11 +15,10 @@
package telemetry
import (
"net/http"
"net/http/pprof"
"github.com/sirupsen/logrus"
"go.opencensus.io/zpages"
"net/http"
"net/http/pprof"
"open-match.dev/open-match/internal/config"
)

@ -54,10 +54,9 @@ type OM interface {
}
// New creates a new e2e test interface.
func New(t *testing.T) OM {
func New(t *testing.T) (OM, func()) {
om := zygote.withT(t)
t.Cleanup(om.cleanup)
return om
return om, om.cleanup
}
// RunMain provides the setup and teardown for Open Match e2e tests.

@ -21,17 +21,18 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/app/evaluator/defaulteval"
"open-match.dev/open-match/internal/app/minimatch"
"open-match.dev/open-match/internal/rpc"
rpcTesting "open-match.dev/open-match/internal/rpc/testing"
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
"open-match.dev/open-match/internal/telemetry"
"open-match.dev/open-match/internal/testing/evaluator"
internalMmf "open-match.dev/open-match/internal/testing/mmf"
"open-match.dev/open-match/internal/util"
pb "open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/test/matchfunction/mmf"
"open-match.dev/open-match/test/evaluator/evaluate"
)
type inmemoryOM struct {
@ -173,7 +174,7 @@ func createMatchFunctionForTest(t *testing.T, c *rpcTesting.TestContext) *rpcTes
func createEvaluatorForTest(t *testing.T) *rpcTesting.TestContext {
tc := rpcTesting.MustServeInsecure(t, func(p *rpc.ServerParams) {
cfg := viper.New()
assert.Nil(t, evaluator.BindService(p, cfg, defaulteval.Evaluate))
assert.Nil(t, evaluator.BindService(p, cfg, evaluate.Evaluate))
})
return tc

@ -18,7 +18,6 @@ package testing
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/pkg/pb"
@ -33,14 +32,14 @@ type FakeFrontend struct {
// for the attributes defined as indices the matchmakaking config. If the
// attributes exist and are valid integers, they will be indexed. Creating a
// ticket adds the Ticket to the pool of Tickets considered for matchmaking.
func (s *FakeFrontend) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.Ticket, error) {
return &pb.Ticket{}, nil
func (s *FakeFrontend) CreateTicket(ctx context.Context, req *pb.CreateTicketRequest) (*pb.CreateTicketResponse, error) {
return &pb.CreateTicketResponse{}, nil
}
// DeleteTicket removes the Ticket from state storage and from corresponding
// configured indices. Deleting the ticket stops the ticket from being
// considered for future matchmaking requests.
func (s *FakeFrontend) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*empty.Empty, error) {
func (s *FakeFrontend) DeleteTicket(ctx context.Context, req *pb.DeleteTicketRequest) (*pb.DeleteTicketResponse, error) {
return nil, status.Error(codes.Unimplemented, "not implemented")
}
@ -49,8 +48,8 @@ func (s *FakeFrontend) GetTicket(ctx context.Context, req *pb.GetTicketRequest)
return nil, status.Error(codes.Unimplemented, "not implemented")
}
// WatchAssignments streams matchmaking results from Open Match for the
// GetAssignments streams matchmaking results from Open Match for the
// provided Ticket id.
func (s *FakeFrontend) WatchAssignments(req *pb.WatchAssignmentsRequest, stream pb.FrontendService_WatchAssignmentsServer) error {
func (s *FakeFrontend) GetAssignments(req *pb.GetAssignmentsRequest, stream pb.FrontendService_GetAssignmentsServer) error {
return status.Error(codes.Unimplemented, "not implemented")
}

@ -24,6 +24,7 @@ import (
grpc_validator "github.com/grpc-ecosystem/go-grpc-middleware/validator"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
)
@ -53,7 +54,7 @@ func NewGRPCDialOptions(grpcLogger *logrus.Entry) []grpc.DialOption {
grpc.WithInsecure(),
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(si...)),
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(ui...)),
grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`),
grpc.WithBalancerName(roundrobin.Name),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 20 * time.Second,
Timeout: 10 * time.Second,

@ -27,7 +27,7 @@ import (
func QueryPool(ctx context.Context, mml pb.QueryServiceClient, pool *pb.Pool) ([]*pb.Ticket, error) {
query, err := mml.QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: pool})
if err != nil {
return nil, fmt.Errorf("error calling queryService.QueryTickets: %w", err)
return nil, fmt.Errorf("Error calling queryService.QueryTickets: %w", err)
}
var tickets []*pb.Ticket
@ -38,7 +38,7 @@ func QueryPool(ctx context.Context, mml pb.QueryServiceClient, pool *pb.Pool) ([
}
if err != nil {
return nil, fmt.Errorf("error receiving tickets from queryService.QueryTickets: %w", err)
return nil, fmt.Errorf("Error receiving tickets from queryService.QueryTickets: %w", err)
}
tickets = append(tickets, resp.Tickets...)
@ -73,7 +73,7 @@ func QueryPools(ctx context.Context, mml pb.QueryServiceClient, pools []*pb.Pool
for i := 0; i < len(pools); i++ {
select {
case <-ctx.Done():
return nil, fmt.Errorf("context canceled while querying pools: %w", ctx.Err())
return nil, fmt.Errorf("Context canceled while querying pools: %w", ctx.Err())
case r := <-results:
if r.err != nil {
return nil, r.err

@ -51,31 +51,6 @@ func (FunctionConfig_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{0, 0}
}
type AssignmentFailure_Cause int32
const (
AssignmentFailure_UNKNOWN AssignmentFailure_Cause = 0
AssignmentFailure_TICKET_NOT_FOUND AssignmentFailure_Cause = 1
)
var AssignmentFailure_Cause_name = map[int32]string{
0: "UNKNOWN",
1: "TICKET_NOT_FOUND",
}
var AssignmentFailure_Cause_value = map[string]int32{
"UNKNOWN": 0,
"TICKET_NOT_FOUND": 1,
}
func (x AssignmentFailure_Cause) String() string {
return proto.EnumName(AssignmentFailure_Cause_name, int32(x))
}
func (AssignmentFailure_Cause) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{6, 0}
}
// FunctionConfig specifies a MMF address and client type for Backend to establish connections with the MMF
type FunctionConfig struct {
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
@ -294,8 +269,7 @@ func (m *ReleaseTicketsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_ReleaseTicketsResponse proto.InternalMessageInfo
// AssignmentGroup contains an Assignment and the Tickets to which it should be applied.
type AssignmentGroup struct {
type AssignTicketsRequest struct {
// TicketIds is a list of strings representing Open Match generated Ids which apply to an Assignment.
TicketIds []string `protobuf:"bytes,1,rep,name=ticket_ids,json=ticketIds,proto3" json:"ticket_ids,omitempty"`
// An Assignment specifies game connection related information to be associated with the TicketIds.
@ -305,106 +279,11 @@ type AssignmentGroup struct {
XXX_sizecache int32 `json:"-"`
}
func (m *AssignmentGroup) Reset() { *m = AssignmentGroup{} }
func (m *AssignmentGroup) String() string { return proto.CompactTextString(m) }
func (*AssignmentGroup) ProtoMessage() {}
func (*AssignmentGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{5}
}
func (m *AssignmentGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AssignmentGroup.Unmarshal(m, b)
}
func (m *AssignmentGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AssignmentGroup.Marshal(b, m, deterministic)
}
func (m *AssignmentGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_AssignmentGroup.Merge(m, src)
}
func (m *AssignmentGroup) XXX_Size() int {
return xxx_messageInfo_AssignmentGroup.Size(m)
}
func (m *AssignmentGroup) XXX_DiscardUnknown() {
xxx_messageInfo_AssignmentGroup.DiscardUnknown(m)
}
var xxx_messageInfo_AssignmentGroup proto.InternalMessageInfo
func (m *AssignmentGroup) GetTicketIds() []string {
if m != nil {
return m.TicketIds
}
return nil
}
func (m *AssignmentGroup) GetAssignment() *Assignment {
if m != nil {
return m.Assignment
}
return nil
}
// AssignmentFailure contains the id of the Ticket that failed the Assignment and the failure status.
type AssignmentFailure struct {
TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"`
Cause AssignmentFailure_Cause `protobuf:"varint,2,opt,name=cause,proto3,enum=openmatch.AssignmentFailure_Cause" json:"cause,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AssignmentFailure) Reset() { *m = AssignmentFailure{} }
func (m *AssignmentFailure) String() string { return proto.CompactTextString(m) }
func (*AssignmentFailure) ProtoMessage() {}
func (*AssignmentFailure) Descriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{6}
}
func (m *AssignmentFailure) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AssignmentFailure.Unmarshal(m, b)
}
func (m *AssignmentFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AssignmentFailure.Marshal(b, m, deterministic)
}
func (m *AssignmentFailure) XXX_Merge(src proto.Message) {
xxx_messageInfo_AssignmentFailure.Merge(m, src)
}
func (m *AssignmentFailure) XXX_Size() int {
return xxx_messageInfo_AssignmentFailure.Size(m)
}
func (m *AssignmentFailure) XXX_DiscardUnknown() {
xxx_messageInfo_AssignmentFailure.DiscardUnknown(m)
}
var xxx_messageInfo_AssignmentFailure proto.InternalMessageInfo
func (m *AssignmentFailure) GetTicketId() string {
if m != nil {
return m.TicketId
}
return ""
}
func (m *AssignmentFailure) GetCause() AssignmentFailure_Cause {
if m != nil {
return m.Cause
}
return AssignmentFailure_UNKNOWN
}
type AssignTicketsRequest struct {
// Assignments is a list of assignment groups that contain assignment and the Tickets to which they should be applied.
Assignments []*AssignmentGroup `protobuf:"bytes,1,rep,name=assignments,proto3" json:"assignments,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AssignTicketsRequest) Reset() { *m = AssignTicketsRequest{} }
func (m *AssignTicketsRequest) String() string { return proto.CompactTextString(m) }
func (*AssignTicketsRequest) ProtoMessage() {}
func (*AssignTicketsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{7}
return fileDescriptor_8dab762378f455cd, []int{5}
}
func (m *AssignTicketsRequest) XXX_Unmarshal(b []byte) error {
@ -425,26 +304,31 @@ func (m *AssignTicketsRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_AssignTicketsRequest proto.InternalMessageInfo
func (m *AssignTicketsRequest) GetAssignments() []*AssignmentGroup {
func (m *AssignTicketsRequest) GetTicketIds() []string {
if m != nil {
return m.Assignments
return m.TicketIds
}
return nil
}
func (m *AssignTicketsRequest) GetAssignment() *Assignment {
if m != nil {
return m.Assignment
}
return nil
}
type AssignTicketsResponse struct {
// Failures is a list of all the Tickets that failed assignment along with the cause of failure.
Failures []*AssignmentFailure `protobuf:"bytes,1,rep,name=failures,proto3" json:"failures,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AssignTicketsResponse) Reset() { *m = AssignTicketsResponse{} }
func (m *AssignTicketsResponse) String() string { return proto.CompactTextString(m) }
func (*AssignTicketsResponse) ProtoMessage() {}
func (*AssignTicketsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_8dab762378f455cd, []int{8}
return fileDescriptor_8dab762378f455cd, []int{6}
}
func (m *AssignTicketsResponse) XXX_Unmarshal(b []byte) error {
@ -465,23 +349,13 @@ func (m *AssignTicketsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_AssignTicketsResponse proto.InternalMessageInfo
func (m *AssignTicketsResponse) GetFailures() []*AssignmentFailure {
if m != nil {
return m.Failures
}
return nil
}
func init() {
proto.RegisterEnum("openmatch.FunctionConfig_Type", FunctionConfig_Type_name, FunctionConfig_Type_value)
proto.RegisterEnum("openmatch.AssignmentFailure_Cause", AssignmentFailure_Cause_name, AssignmentFailure_Cause_value)
proto.RegisterType((*FunctionConfig)(nil), "openmatch.FunctionConfig")
proto.RegisterType((*FetchMatchesRequest)(nil), "openmatch.FetchMatchesRequest")
proto.RegisterType((*FetchMatchesResponse)(nil), "openmatch.FetchMatchesResponse")
proto.RegisterType((*ReleaseTicketsRequest)(nil), "openmatch.ReleaseTicketsRequest")
proto.RegisterType((*ReleaseTicketsResponse)(nil), "openmatch.ReleaseTicketsResponse")
proto.RegisterType((*AssignmentGroup)(nil), "openmatch.AssignmentGroup")
proto.RegisterType((*AssignmentFailure)(nil), "openmatch.AssignmentFailure")
proto.RegisterType((*AssignTicketsRequest)(nil), "openmatch.AssignTicketsRequest")
proto.RegisterType((*AssignTicketsResponse)(nil), "openmatch.AssignTicketsResponse")
}
@ -489,62 +363,55 @@ func init() {
func init() { proto.RegisterFile("api/backend.proto", fileDescriptor_8dab762378f455cd) }
var fileDescriptor_8dab762378f455cd = []byte{
// 879 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0x1b, 0x45,
0x14, 0xee, 0xd8, 0xce, 0x8f, 0x8f, 0xc1, 0xb8, 0x43, 0x52, 0x8c, 0x29, 0x74, 0xb3, 0x88, 0x12,
0x99, 0x7a, 0x37, 0x59, 0x02, 0xaa, 0xcc, 0x8f, 0x9a, 0xba, 0x49, 0x15, 0xb5, 0x38, 0x65, 0xe3,
0x82, 0xc4, 0x4d, 0xb4, 0x5e, 0x1f, 0xaf, 0x97, 0xd8, 0x3b, 0xc3, 0xce, 0x6c, 0x4a, 0x85, 0x84,
0x10, 0xe2, 0x02, 0x71, 0x09, 0x12, 0x17, 0x7d, 0x04, 0xee, 0x78, 0x16, 0x6e, 0x78, 0x00, 0x1e,
0x04, 0xed, 0xcc, 0xfa, 0x37, 0x4e, 0x7a, 0xe5, 0x99, 0x39, 0xdf, 0xf9, 0xbe, 0xef, 0x9c, 0x3d,
0x33, 0x86, 0xeb, 0x1e, 0x0f, 0xed, 0xae, 0xe7, 0x9f, 0x61, 0xd4, 0xb3, 0x78, 0xcc, 0x24, 0xa3,
0x45, 0xc6, 0x31, 0x1a, 0x79, 0xd2, 0x1f, 0xd4, 0x68, 0x1a, 0x1d, 0xa1, 0x10, 0x5e, 0x80, 0x42,
0x87, 0x6b, 0x37, 0x03, 0xc6, 0x82, 0x21, 0xda, 0x69, 0xc8, 0x8b, 0x22, 0x26, 0x3d, 0x19, 0xb2,
0x68, 0x1c, 0xbd, 0xa3, 0x7e, 0xfc, 0x46, 0x80, 0x51, 0x43, 0x3c, 0xf3, 0x82, 0x00, 0x63, 0x9b,
0x71, 0x85, 0xb8, 0x88, 0x36, 0x7f, 0x25, 0x50, 0x3e, 0x4c, 0x22, 0x3f, 0x3d, 0x6b, 0xb1, 0xa8,
0x1f, 0x06, 0x94, 0x42, 0x61, 0xc0, 0x84, 0xac, 0x12, 0x83, 0x6c, 0x17, 0x5d, 0xb5, 0x4e, 0xcf,
0x38, 0x8b, 0x65, 0x35, 0x67, 0x90, 0xed, 0x15, 0x57, 0xad, 0xa9, 0x03, 0x05, 0xf9, 0x9c, 0x63,
0x35, 0x6f, 0x90, 0xed, 0xb2, 0xf3, 0x8e, 0x35, 0x31, 0x6d, 0xcd, 0x13, 0x5a, 0x9d, 0xe7, 0x1c,
0x5d, 0x85, 0x35, 0x6b, 0x50, 0x48, 0x77, 0x74, 0x1d, 0x0a, 0x0f, 0xdd, 0x27, 0xad, 0xca, 0xb5,
0x74, 0xe5, 0x1e, 0x9c, 0x74, 0x2a, 0xc4, 0xfc, 0x01, 0x5e, 0x3f, 0x44, 0xe9, 0x0f, 0xbe, 0x48,
0x39, 0x50, 0xb8, 0xf8, 0x5d, 0x82, 0x42, 0xd2, 0x5d, 0x58, 0xf5, 0x15, 0x8f, 0x32, 0x54, 0x72,
0xde, 0xbc, 0x54, 0xc8, 0xcd, 0x80, 0x74, 0x17, 0xd6, 0x78, 0xcc, 0xfa, 0xe1, 0x10, 0x95, 0xe1,
0x92, 0xf3, 0xc6, 0x4c, 0x8e, 0xa2, 0x7f, 0xa2, 0xc3, 0xee, 0x18, 0x67, 0x7e, 0x0e, 0x1b, 0xf3,
0xe2, 0x82, 0xb3, 0x48, 0x20, 0xbd, 0x0d, 0x2b, 0x2a, 0x2d, 0x13, 0xaf, 0x2c, 0x12, 0xb9, 0x3a,
0x6c, 0x7e, 0x0c, 0x9b, 0x2e, 0x0e, 0xd1, 0x13, 0xd8, 0x09, 0xfd, 0x33, 0x94, 0x13, 0xfb, 0x6f,
0x03, 0x48, 0x75, 0x72, 0x1a, 0xf6, 0x44, 0x95, 0x18, 0xf9, 0xed, 0xa2, 0x5b, 0xd4, 0x27, 0x47,
0x3d, 0x61, 0x56, 0xe1, 0xc6, 0x62, 0x9e, 0x56, 0x36, 0x03, 0x78, 0x6d, 0x5f, 0x88, 0x30, 0x88,
0x46, 0x18, 0xc9, 0x87, 0x31, 0x4b, 0xf8, 0x4b, 0xb8, 0xe8, 0x47, 0x00, 0xde, 0x24, 0x23, 0xab,
0x7c, 0x73, 0xc6, 0xf0, 0x94, 0xce, 0x9d, 0x01, 0x9a, 0x7f, 0x12, 0xb8, 0x3e, 0x0d, 0x1d, 0x7a,
0xe1, 0x30, 0x89, 0x91, 0xbe, 0x05, 0xc5, 0x89, 0x56, 0x36, 0x0a, 0xeb, 0x63, 0x29, 0x7a, 0x17,
0x56, 0x7c, 0x2f, 0x11, 0xba, 0xbd, 0x65, 0xc7, 0x5c, 0x2a, 0x92, 0x31, 0x59, 0xad, 0x14, 0xe9,
0xea, 0x04, 0xb3, 0x0e, 0x2b, 0x6a, 0x4f, 0x4b, 0xb0, 0xf6, 0xb4, 0xfd, 0xa8, 0x7d, 0xfc, 0x75,
0xbb, 0x72, 0x8d, 0x6e, 0x40, 0xa5, 0x73, 0xd4, 0x7a, 0x74, 0xd0, 0x39, 0x6d, 0x1f, 0x77, 0x4e,
0x0f, 0x8f, 0x9f, 0xb6, 0x1f, 0x54, 0x88, 0xd9, 0x81, 0x0d, 0xcd, 0xb6, 0xd0, 0xd2, 0x4f, 0xa1,
0x34, 0xb5, 0xaf, 0xfb, 0x50, 0x72, 0x6a, 0x4b, 0x3d, 0xa8, 0xbe, 0xb9, 0xb3, 0x70, 0xf3, 0x4b,
0xd8, 0x5c, 0x60, 0xcd, 0x3e, 0xf5, 0x5d, 0x58, 0xef, 0x6b, 0xcb, 0x63, 0xce, 0x9b, 0x57, 0xd5,
0xe5, 0x4e, 0xd0, 0xce, 0x8b, 0x3c, 0x94, 0xef, 0xeb, 0x1b, 0x7c, 0x82, 0xf1, 0x79, 0xe8, 0x23,
0xfd, 0x11, 0x5e, 0x99, 0x9d, 0x27, 0x3a, 0x77, 0x3d, 0x2e, 0x4e, 0x79, 0xed, 0xd6, 0xa5, 0xf1,
0x6c, 0x1c, 0x3e, 0xf8, 0xf9, 0x9f, 0xff, 0xfe, 0xc8, 0xbd, 0x67, 0x1a, 0xf6, 0xf9, 0xee, 0xf8,
0xb9, 0x10, 0x5a, 0xcc, 0x1e, 0x69, 0x6c, 0xb3, 0x9f, 0x26, 0x36, 0x49, 0x7d, 0x87, 0xd0, 0x9f,
0x08, 0xbc, 0x3a, 0x57, 0x26, 0xbd, 0x75, 0xa1, 0x98, 0xf9, 0xb6, 0xd6, 0x8c, 0xcb, 0x01, 0x99,
0x87, 0x3b, 0xca, 0xc3, 0x6d, 0x73, 0x6b, 0x89, 0x07, 0x3d, 0x1b, 0xa2, 0xa9, 0x5b, 0xdd, 0x24,
0x75, 0xfa, 0x0b, 0x81, 0xf2, 0xfc, 0x6c, 0xd3, 0x59, 0x89, 0xa5, 0xd7, 0xa5, 0xb6, 0x75, 0x05,
0x22, 0x73, 0xd1, 0x50, 0x2e, 0xde, 0x37, 0xcd, 0x2b, 0x5c, 0xc4, 0x3a, 0xb5, 0x49, 0xea, 0xf7,
0x7f, 0xcb, 0xff, 0xbe, 0xff, 0x6f, 0x8e, 0xfe, 0x4d, 0x60, 0x2d, 0xfb, 0x46, 0xe6, 0x11, 0xc0,
0x31, 0xc7, 0xc8, 0x50, 0x3d, 0xa6, 0x37, 0x06, 0x52, 0x72, 0xd1, 0xb4, 0xed, 0x54, 0xb9, 0xa1,
0xa5, 0x7b, 0x78, 0x5e, 0x7b, 0x77, 0xba, 0x6f, 0xf4, 0x42, 0xe1, 0x27, 0x42, 0xdc, 0xd3, 0x2f,
0x6f, 0x90, 0x4e, 0x95, 0xb0, 0x7c, 0x36, 0xaa, 0x7f, 0x05, 0x74, 0x9f, 0x7b, 0xfe, 0x00, 0x0d,
0xc7, 0xda, 0x31, 0x1e, 0x87, 0x3e, 0xa6, 0xa3, 0x74, 0x6f, 0x4c, 0x19, 0x84, 0x72, 0x90, 0x74,
0x53, 0xa4, 0xad, 0x53, 0xfb, 0x2c, 0x0e, 0xbc, 0x11, 0x8a, 0x19, 0x31, 0xbb, 0x3b, 0x64, 0x5d,
0x7b, 0xe4, 0x09, 0x89, 0xb1, 0xfd, 0xf8, 0xa8, 0x75, 0xd0, 0x3e, 0x39, 0x70, 0xf2, 0xbb, 0xd6,
0x4e, 0x3d, 0x47, 0x72, 0x4e, 0xc5, 0xe3, 0x7c, 0x18, 0xfa, 0xea, 0xd1, 0xb6, 0xbf, 0x15, 0x2c,
0x6a, 0x5e, 0x38, 0x71, 0x3f, 0x81, 0xfc, 0xde, 0xce, 0x1e, 0xdd, 0x83, 0xba, 0x8b, 0x32, 0x89,
0x23, 0xec, 0x19, 0xcf, 0x06, 0x18, 0x19, 0x72, 0x80, 0x46, 0x8c, 0x82, 0x25, 0xb1, 0x8f, 0x46,
0x8f, 0xa1, 0x30, 0x22, 0x26, 0x0d, 0xfc, 0x3e, 0x14, 0xd2, 0xa2, 0xab, 0x50, 0x78, 0x91, 0x23,
0x6b, 0xf1, 0x67, 0x50, 0x9d, 0x36, 0xc3, 0x78, 0xc0, 0xfc, 0x24, 0x1d, 0x72, 0xc5, 0x4e, 0xb7,
0x96, 0xb7, 0xc6, 0x16, 0xa1, 0x44, 0xbb, 0xc7, 0x7c, 0x61, 0x7f, 0x63, 0x2c, 0x84, 0x66, 0xea,
0xe2, 0x67, 0x81, 0xcd, 0xbb, 0x7f, 0xe5, 0x8a, 0x29, 0xbf, 0xa2, 0xef, 0xae, 0xaa, 0x7f, 0x9d,
0x0f, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x6e, 0x5a, 0xc2, 0xf5, 0x06, 0x00, 0x00,
// 753 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0x2b, 0x35,
0x18, 0xc5, 0x49, 0x6e, 0x4b, 0x7c, 0x21, 0x0a, 0xe6, 0xf6, 0xde, 0x10, 0x01, 0xd7, 0x1d, 0x44,
0x89, 0x42, 0x33, 0x4e, 0x86, 0xc2, 0x22, 0x08, 0xd4, 0xb4, 0xb4, 0xa8, 0x52, 0x81, 0x6a, 0x5a,
0xb1, 0x60, 0x83, 0x26, 0x9e, 0x2f, 0x33, 0x43, 0x13, 0xdb, 0x8c, 0x3d, 0x2d, 0x15, 0x12, 0x42,
0x88, 0x05, 0x62, 0x09, 0xbb, 0x3e, 0x02, 0x3b, 0x9e, 0x85, 0x0d, 0x0f, 0xc0, 0x83, 0xa0, 0xf1,
0x24, 0x6d, 0x7e, 0xda, 0x4a, 0x77, 0x15, 0xe7, 0x3b, 0xe7, 0x3b, 0xe7, 0xf8, 0xb3, 0xc7, 0xf8,
0xb5, 0x40, 0x25, 0x6c, 0x18, 0xf0, 0x73, 0x10, 0xa1, 0xab, 0x52, 0x69, 0x24, 0xa9, 0x4a, 0x05,
0x62, 0x12, 0x18, 0x1e, 0x37, 0x49, 0x8e, 0x4e, 0x40, 0xeb, 0x20, 0x02, 0x5d, 0xc0, 0xcd, 0x37,
0x23, 0x29, 0xa3, 0x31, 0xb0, 0x1c, 0x0a, 0x84, 0x90, 0x26, 0x30, 0x89, 0x14, 0x33, 0x74, 0xdb,
0xfe, 0xf0, 0x4e, 0x04, 0xa2, 0xa3, 0x2f, 0x83, 0x28, 0x82, 0x94, 0x49, 0x65, 0x19, 0xab, 0x6c,
0xe7, 0x37, 0x84, 0x6b, 0x87, 0x99, 0xe0, 0x79, 0x6d, 0x5f, 0x8a, 0x51, 0x12, 0x11, 0x82, 0x2b,
0xb1, 0xd4, 0xa6, 0x81, 0x28, 0x6a, 0x55, 0x7d, 0xbb, 0xce, 0x6b, 0x4a, 0xa6, 0xa6, 0x51, 0xa2,
0xa8, 0xf5, 0xc8, 0xb7, 0x6b, 0xe2, 0xe1, 0x8a, 0xb9, 0x52, 0xd0, 0x28, 0x53, 0xd4, 0xaa, 0x79,
0x6f, 0xbb, 0x37, 0xa1, 0xdd, 0x45, 0x41, 0xf7, 0xec, 0x4a, 0x81, 0x6f, 0xb9, 0x4e, 0x13, 0x57,
0xf2, 0x7f, 0xe4, 0x65, 0x5c, 0xf9, 0xdc, 0x3f, 0xd9, 0xaf, 0xbf, 0x94, 0xaf, 0xfc, 0x83, 0xd3,
0xb3, 0x3a, 0x72, 0x7e, 0xc4, 0xaf, 0x1f, 0x82, 0xe1, 0xf1, 0x17, 0xb9, 0x06, 0x68, 0x1f, 0xbe,
0xcf, 0x40, 0x1b, 0xd2, 0xc3, 0x6b, 0xdc, 0xea, 0xd8, 0x40, 0x8f, 0xbd, 0x37, 0xee, 0x35, 0xf2,
0xa7, 0x44, 0xd2, 0xc3, 0xeb, 0x2a, 0x95, 0xa3, 0x64, 0x0c, 0x36, 0xf0, 0x63, 0xef, 0xd9, 0x5c,
0x8f, 0x95, 0x3f, 0x29, 0x60, 0x7f, 0xc6, 0x73, 0x3e, 0xc5, 0x4f, 0x16, 0xcd, 0xb5, 0x92, 0x42,
0x03, 0xd9, 0xc2, 0x8f, 0x6c, 0xdb, 0xd4, 0xbc, 0xbe, 0x2c, 0xe4, 0x17, 0xb0, 0xf3, 0x11, 0xde,
0xf0, 0x61, 0x0c, 0x81, 0x86, 0xb3, 0x84, 0x9f, 0x83, 0xb9, 0x89, 0xff, 0x16, 0xc6, 0xc6, 0x56,
0xbe, 0x4d, 0x42, 0xdd, 0x40, 0xb4, 0xdc, 0xaa, 0xfa, 0xd5, 0xa2, 0x72, 0x14, 0x6a, 0xa7, 0x81,
0x9f, 0x2e, 0xf7, 0x15, 0xce, 0xce, 0x18, 0x3f, 0x19, 0x68, 0x9d, 0x44, 0xe2, 0x85, 0x04, 0xc9,
0x87, 0x18, 0x07, 0xb6, 0x6d, 0x02, 0xc2, 0x4c, 0xb7, 0xbf, 0x31, 0x97, 0x7a, 0x70, 0x03, 0xfa,
0x73, 0x44, 0xe7, 0x19, 0xde, 0x58, 0x72, 0x2b, 0x62, 0x78, 0xd7, 0x65, 0x5c, 0xdb, 0x2b, 0x6e,
0xe7, 0x29, 0xa4, 0x17, 0x09, 0x07, 0xf2, 0x13, 0x7e, 0x65, 0x7e, 0x56, 0x64, 0xe1, 0xe8, 0x57,
0x4f, 0xb0, 0xf9, 0xfc, 0x5e, 0x7c, 0xba, 0xd5, 0xf7, 0x7f, 0xf9, 0xe7, 0xbf, 0x3f, 0x4b, 0xef,
0x3a, 0x94, 0x5d, 0xf4, 0x66, 0x9f, 0x82, 0x2e, 0xcc, 0xd8, 0xa4, 0xe0, 0xf6, 0x47, 0x79, 0x63,
0x1f, 0xb5, 0xbb, 0x88, 0xfc, 0x8c, 0xf0, 0xab, 0x0b, 0x61, 0xc9, 0xf3, 0x95, 0x0d, 0x2e, 0x0e,
0xad, 0x49, 0xef, 0x27, 0x4c, 0x33, 0x6c, 0xdb, 0x0c, 0x5b, 0xce, 0xe6, 0x1d, 0x19, 0x8a, 0xe9,
0xea, 0x7e, 0x31, 0xaf, 0x3e, 0x6a, 0x93, 0x5f, 0x11, 0xae, 0x2d, 0x9e, 0x1b, 0x99, 0xb7, 0xb8,
0xf3, 0x2a, 0x34, 0x37, 0x1f, 0x60, 0x4c, 0x53, 0x74, 0x6c, 0x8a, 0xf7, 0x1c, 0xe7, 0x81, 0x14,
0x69, 0xd1, 0xda, 0x47, 0xed, 0xbd, 0xdf, 0xcb, 0x7f, 0x0c, 0xfe, 0x2d, 0x91, 0xbf, 0x11, 0x5e,
0x9f, 0x9e, 0x91, 0x73, 0x84, 0xf1, 0x57, 0x0a, 0x04, 0xb5, 0x33, 0x26, 0x4f, 0x63, 0x63, 0x94,
0xee, 0x33, 0x96, 0x3b, 0x77, 0x0a, 0xeb, 0x10, 0x2e, 0x9a, 0xef, 0xdc, 0xfe, 0xef, 0x84, 0x89,
0xe6, 0x99, 0xd6, 0xbb, 0xc5, 0xab, 0x12, 0xa5, 0x32, 0x53, 0xda, 0xe5, 0x72, 0xd2, 0xfe, 0x1a,
0x93, 0x81, 0x0a, 0x78, 0x0c, 0xd4, 0x73, 0xbb, 0xf4, 0x38, 0xe1, 0x90, 0x7f, 0x11, 0xbb, 0x33,
0xc9, 0x28, 0x31, 0x71, 0x36, 0xcc, 0x99, 0xac, 0x68, 0x1d, 0xc9, 0x34, 0x0a, 0x26, 0xa0, 0xe7,
0xcc, 0xd8, 0x70, 0x2c, 0x87, 0x6c, 0x12, 0x68, 0x03, 0x29, 0x3b, 0x3e, 0xda, 0x3f, 0xf8, 0xf2,
0xf4, 0xc0, 0x2b, 0xf7, 0xdc, 0x6e, 0xbb, 0x84, 0x4a, 0x5e, 0x3d, 0x50, 0x6a, 0x9c, 0x70, 0xfb,
0x20, 0xb1, 0xef, 0xb4, 0x14, 0xfd, 0x95, 0x8a, 0xff, 0x31, 0x2e, 0xef, 0x74, 0x77, 0xc8, 0x0e,
0x6e, 0xfb, 0x60, 0xb2, 0x54, 0x40, 0x48, 0x2f, 0x63, 0x10, 0xd4, 0xc4, 0x40, 0x53, 0xd0, 0x32,
0x4b, 0x39, 0xd0, 0x50, 0x82, 0xa6, 0x42, 0x1a, 0x0a, 0x3f, 0x24, 0xda, 0xb8, 0x64, 0x0d, 0x57,
0xae, 0x4b, 0x68, 0x3d, 0xfd, 0x04, 0x37, 0x6e, 0x87, 0x41, 0x3f, 0x93, 0x3c, 0xcb, 0x2f, 0xbb,
0x55, 0x27, 0x9b, 0x77, 0x8f, 0x86, 0xe9, 0xc4, 0x00, 0x0b, 0x25, 0xd7, 0xec, 0x1b, 0xba, 0x04,
0xcd, 0xed, 0x4b, 0x9d, 0x47, 0x4c, 0x0d, 0xff, 0x2a, 0x55, 0x73, 0x7d, 0x2b, 0x3f, 0x5c, 0xb3,
0x2f, 0xea, 0x07, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x81, 0xb8, 0x32, 0x4c, 0xd1, 0x05, 0x00,
0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

@ -7,7 +7,6 @@ import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
@ -67,6 +66,46 @@ func (m *CreateTicketRequest) GetTicket() *Ticket {
return nil
}
type CreateTicketResponse struct {
// A Ticket object with TicketId generated.
Ticket *Ticket `protobuf:"bytes,1,opt,name=ticket,proto3" json:"ticket,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateTicketResponse) Reset() { *m = CreateTicketResponse{} }
func (m *CreateTicketResponse) String() string { return proto.CompactTextString(m) }
func (*CreateTicketResponse) ProtoMessage() {}
func (*CreateTicketResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{1}
}
func (m *CreateTicketResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateTicketResponse.Unmarshal(m, b)
}
func (m *CreateTicketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateTicketResponse.Marshal(b, m, deterministic)
}
func (m *CreateTicketResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateTicketResponse.Merge(m, src)
}
func (m *CreateTicketResponse) XXX_Size() int {
return xxx_messageInfo_CreateTicketResponse.Size(m)
}
func (m *CreateTicketResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CreateTicketResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CreateTicketResponse proto.InternalMessageInfo
func (m *CreateTicketResponse) GetTicket() *Ticket {
if m != nil {
return m.Ticket
}
return nil
}
type DeleteTicketRequest struct {
// A TicketId of a generated Ticket to be deleted.
TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"`
@ -79,7 +118,7 @@ func (m *DeleteTicketRequest) Reset() { *m = DeleteTicketRequest{} }
func (m *DeleteTicketRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteTicketRequest) ProtoMessage() {}
func (*DeleteTicketRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{1}
return fileDescriptor_06c902cf58d2ae57, []int{2}
}
func (m *DeleteTicketRequest) XXX_Unmarshal(b []byte) error {
@ -107,6 +146,37 @@ func (m *DeleteTicketRequest) GetTicketId() string {
return ""
}
type DeleteTicketResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteTicketResponse) Reset() { *m = DeleteTicketResponse{} }
func (m *DeleteTicketResponse) String() string { return proto.CompactTextString(m) }
func (*DeleteTicketResponse) ProtoMessage() {}
func (*DeleteTicketResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{3}
}
func (m *DeleteTicketResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteTicketResponse.Unmarshal(m, b)
}
func (m *DeleteTicketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteTicketResponse.Marshal(b, m, deterministic)
}
func (m *DeleteTicketResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteTicketResponse.Merge(m, src)
}
func (m *DeleteTicketResponse) XXX_Size() int {
return xxx_messageInfo_DeleteTicketResponse.Size(m)
}
func (m *DeleteTicketResponse) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteTicketResponse.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteTicketResponse proto.InternalMessageInfo
type GetTicketRequest struct {
// A TicketId of a generated Ticket.
TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"`
@ -119,7 +189,7 @@ func (m *GetTicketRequest) Reset() { *m = GetTicketRequest{} }
func (m *GetTicketRequest) String() string { return proto.CompactTextString(m) }
func (*GetTicketRequest) ProtoMessage() {}
func (*GetTicketRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{2}
return fileDescriptor_06c902cf58d2ae57, []int{4}
}
func (m *GetTicketRequest) XXX_Unmarshal(b []byte) error {
@ -147,7 +217,7 @@ func (m *GetTicketRequest) GetTicketId() string {
return ""
}
type WatchAssignmentsRequest struct {
type GetAssignmentsRequest struct {
// A TicketId of a generated Ticket to get updates on.
TicketId string `protobuf:"bytes,1,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -155,39 +225,39 @@ type WatchAssignmentsRequest struct {
XXX_sizecache int32 `json:"-"`
}
func (m *WatchAssignmentsRequest) Reset() { *m = WatchAssignmentsRequest{} }
func (m *WatchAssignmentsRequest) String() string { return proto.CompactTextString(m) }
func (*WatchAssignmentsRequest) ProtoMessage() {}
func (*WatchAssignmentsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{3}
func (m *GetAssignmentsRequest) Reset() { *m = GetAssignmentsRequest{} }
func (m *GetAssignmentsRequest) String() string { return proto.CompactTextString(m) }
func (*GetAssignmentsRequest) ProtoMessage() {}
func (*GetAssignmentsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{5}
}
func (m *WatchAssignmentsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WatchAssignmentsRequest.Unmarshal(m, b)
func (m *GetAssignmentsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetAssignmentsRequest.Unmarshal(m, b)
}
func (m *WatchAssignmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WatchAssignmentsRequest.Marshal(b, m, deterministic)
func (m *GetAssignmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetAssignmentsRequest.Marshal(b, m, deterministic)
}
func (m *WatchAssignmentsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_WatchAssignmentsRequest.Merge(m, src)
func (m *GetAssignmentsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetAssignmentsRequest.Merge(m, src)
}
func (m *WatchAssignmentsRequest) XXX_Size() int {
return xxx_messageInfo_WatchAssignmentsRequest.Size(m)
func (m *GetAssignmentsRequest) XXX_Size() int {
return xxx_messageInfo_GetAssignmentsRequest.Size(m)
}
func (m *WatchAssignmentsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_WatchAssignmentsRequest.DiscardUnknown(m)
func (m *GetAssignmentsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetAssignmentsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_WatchAssignmentsRequest proto.InternalMessageInfo
var xxx_messageInfo_GetAssignmentsRequest proto.InternalMessageInfo
func (m *WatchAssignmentsRequest) GetTicketId() string {
func (m *GetAssignmentsRequest) GetTicketId() string {
if m != nil {
return m.TicketId
}
return ""
}
type WatchAssignmentsResponse struct {
type GetAssignmentsResponse struct {
// An updated Assignment of the requested Ticket.
Assignment *Assignment `protobuf:"bytes,1,opt,name=assignment,proto3" json:"assignment,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -195,32 +265,32 @@ type WatchAssignmentsResponse struct {
XXX_sizecache int32 `json:"-"`
}
func (m *WatchAssignmentsResponse) Reset() { *m = WatchAssignmentsResponse{} }
func (m *WatchAssignmentsResponse) String() string { return proto.CompactTextString(m) }
func (*WatchAssignmentsResponse) ProtoMessage() {}
func (*WatchAssignmentsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{4}
func (m *GetAssignmentsResponse) Reset() { *m = GetAssignmentsResponse{} }
func (m *GetAssignmentsResponse) String() string { return proto.CompactTextString(m) }
func (*GetAssignmentsResponse) ProtoMessage() {}
func (*GetAssignmentsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_06c902cf58d2ae57, []int{6}
}
func (m *WatchAssignmentsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WatchAssignmentsResponse.Unmarshal(m, b)
func (m *GetAssignmentsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetAssignmentsResponse.Unmarshal(m, b)
}
func (m *WatchAssignmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WatchAssignmentsResponse.Marshal(b, m, deterministic)
func (m *GetAssignmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetAssignmentsResponse.Marshal(b, m, deterministic)
}
func (m *WatchAssignmentsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_WatchAssignmentsResponse.Merge(m, src)
func (m *GetAssignmentsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetAssignmentsResponse.Merge(m, src)
}
func (m *WatchAssignmentsResponse) XXX_Size() int {
return xxx_messageInfo_WatchAssignmentsResponse.Size(m)
func (m *GetAssignmentsResponse) XXX_Size() int {
return xxx_messageInfo_GetAssignmentsResponse.Size(m)
}
func (m *WatchAssignmentsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_WatchAssignmentsResponse.DiscardUnknown(m)
func (m *GetAssignmentsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetAssignmentsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_WatchAssignmentsResponse proto.InternalMessageInfo
var xxx_messageInfo_GetAssignmentsResponse proto.InternalMessageInfo
func (m *WatchAssignmentsResponse) GetAssignment() *Assignment {
func (m *GetAssignmentsResponse) GetAssignment() *Assignment {
if m != nil {
return m.Assignment
}
@ -229,57 +299,58 @@ func (m *WatchAssignmentsResponse) GetAssignment() *Assignment {
func init() {
proto.RegisterType((*CreateTicketRequest)(nil), "openmatch.CreateTicketRequest")
proto.RegisterType((*CreateTicketResponse)(nil), "openmatch.CreateTicketResponse")
proto.RegisterType((*DeleteTicketRequest)(nil), "openmatch.DeleteTicketRequest")
proto.RegisterType((*DeleteTicketResponse)(nil), "openmatch.DeleteTicketResponse")
proto.RegisterType((*GetTicketRequest)(nil), "openmatch.GetTicketRequest")
proto.RegisterType((*WatchAssignmentsRequest)(nil), "openmatch.WatchAssignmentsRequest")
proto.RegisterType((*WatchAssignmentsResponse)(nil), "openmatch.WatchAssignmentsResponse")
proto.RegisterType((*GetAssignmentsRequest)(nil), "openmatch.GetAssignmentsRequest")
proto.RegisterType((*GetAssignmentsResponse)(nil), "openmatch.GetAssignmentsResponse")
}
func init() { proto.RegisterFile("api/frontend.proto", fileDescriptor_06c902cf58d2ae57) }
var fileDescriptor_06c902cf58d2ae57 = []byte{
// 643 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5f, 0x4f, 0x13, 0x4f,
0x14, 0xcd, 0x16, 0xc2, 0x8f, 0xce, 0x8f, 0x44, 0x1c, 0x22, 0x92, 0xd6, 0x98, 0x71, 0x49, 0x54,
0x1a, 0xbb, 0x53, 0x0a, 0xf8, 0x00, 0x31, 0x01, 0x01, 0x0d, 0x09, 0x6a, 0x2c, 0x46, 0x13, 0x5f,
0xcc, 0x76, 0xf7, 0xb2, 0x1d, 0xe9, 0xce, 0x8c, 0x7b, 0x67, 0x41, 0x63, 0x4c, 0x8c, 0xaf, 0xbe,
0xe9, 0x1b, 0x1f, 0xc1, 0x47, 0xbf, 0x8a, 0x4f, 0xbe, 0xfb, 0x41, 0xcc, 0xce, 0xf6, 0xcf, 0x5a,
0x2a, 0xc1, 0xa7, 0x66, 0xe7, 0x9e, 0x7b, 0xce, 0xbd, 0xe7, 0xdc, 0x94, 0x50, 0x5f, 0x0b, 0x7e,
0x98, 0x28, 0x69, 0x40, 0x86, 0x9e, 0x4e, 0x94, 0x51, 0xb4, 0xac, 0x34, 0xc8, 0xd8, 0x37, 0x41,
0xa7, 0x62, 0xcb, 0x31, 0x20, 0xfa, 0x11, 0x60, 0x5e, 0xae, 0x5c, 0x8b, 0x94, 0x8a, 0xba, 0xc0,
0xb3, 0x92, 0x2f, 0xa5, 0x32, 0xbe, 0x11, 0x4a, 0xf6, 0xab, 0x77, 0xec, 0x4f, 0x50, 0x8f, 0x40,
0xd6, 0xf1, 0xc4, 0x8f, 0x22, 0x48, 0xb8, 0xd2, 0x16, 0x31, 0x06, 0x5d, 0xed, 0x71, 0xd9, 0xaf,
0x76, 0x7a, 0xc8, 0x21, 0xd6, 0xe6, 0x5d, 0x5e, 0x74, 0x37, 0xc9, 0xdc, 0x76, 0x02, 0xbe, 0x81,
0x67, 0x22, 0x38, 0x02, 0xd3, 0x82, 0x37, 0x29, 0xa0, 0xa1, 0x4b, 0x64, 0xca, 0xd8, 0x87, 0x05,
0x87, 0x39, 0xb7, 0xff, 0x6f, 0x5e, 0xf6, 0x06, 0xf3, 0x7a, 0x3d, 0x64, 0x0f, 0xe0, 0x36, 0xc9,
0xdc, 0x0e, 0x74, 0x61, 0x94, 0xa1, 0x4a, 0xca, 0x39, 0xe0, 0x95, 0x08, 0x2d, 0x49, 0xb9, 0x35,
0x9d, 0x3f, 0xec, 0x85, 0x2e, 0x27, 0xb3, 0x0f, 0xc1, 0xfc, 0x43, 0xc3, 0x5d, 0x72, 0xf5, 0x45,
0x26, 0xbe, 0x85, 0x28, 0x22, 0x19, 0x83, 0x34, 0x78, 0xa1, 0xbe, 0xa7, 0x64, 0xe1, 0x6c, 0x1f,
0x6a, 0x25, 0x11, 0xe8, 0x1a, 0x21, 0xfe, 0xe0, 0xb9, 0xb7, 0xe7, 0x95, 0xc2, 0x9e, 0xc3, 0x9e,
0x56, 0x01, 0xd8, 0xfc, 0x38, 0x49, 0x2e, 0x3d, 0xe8, 0x85, 0x79, 0x00, 0xc9, 0xb1, 0x08, 0x80,
0x0a, 0x32, 0x53, 0x74, 0x91, 0x5e, 0x2f, 0xd0, 0x8c, 0xb1, 0xb7, 0x72, 0xd6, 0x4e, 0xf7, 0xe6,
0xa7, 0x1f, 0xbf, 0xbe, 0x96, 0x98, 0x5b, 0xe5, 0xc7, 0xcb, 0x83, 0x63, 0xc1, 0x9c, 0x9f, 0xe7,
0xfb, 0xe0, 0xba, 0x53, 0xa3, 0x27, 0x64, 0xa6, 0x68, 0xf7, 0x1f, 0x52, 0x63, 0x72, 0xa8, 0xcc,
0x7b, 0x79, 0xfc, 0x5e, 0x3f, 0x7e, 0x6f, 0x37, 0x8b, 0xdf, 0xe5, 0x56, 0x6f, 0xa9, 0x76, 0xeb,
0x1c, 0x3d, 0xfe, 0x7e, 0xe0, 0xec, 0x07, 0xda, 0x25, 0xe5, 0x41, 0x66, 0xb4, 0x5a, 0x50, 0x1d,
0x4d, 0x72, 0xdc, 0x76, 0x3d, 0x35, 0x7a, 0x61, 0xb5, 0x53, 0x87, 0xcc, 0x8e, 0x26, 0x47, 0xdd,
0x02, 0xf1, 0x5f, 0xce, 0xa1, 0xb2, 0x78, 0x2e, 0x26, 0x8f, 0xde, 0xdd, 0xb0, 0xe3, 0xac, 0xd1,
0x95, 0x0b, 0x8e, 0xc3, 0x87, 0xf9, 0x63, 0xc3, 0xb9, 0xff, 0x79, 0xe2, 0xcb, 0xd6, 0xcf, 0x12,
0xfd, 0xee, 0x90, 0xe9, 0xfe, 0x25, 0xb8, 0x7b, 0x84, 0x3c, 0xd1, 0x20, 0xd9, 0xa3, 0x4c, 0x93,
0xce, 0x77, 0x8c, 0xd1, 0xb8, 0xce, 0x79, 0x36, 0x4c, 0x3d, 0x9f, 0x26, 0x84, 0xe3, 0xca, 0xe2,
0xf0, 0xbb, 0x1e, 0x0a, 0x0c, 0x52, 0xc4, 0xcd, 0x3c, 0x9f, 0x28, 0x51, 0xa9, 0x46, 0x2f, 0x50,
0x71, 0xed, 0x39, 0xa1, 0x5b, 0xda, 0x0f, 0x3a, 0xc0, 0x9a, 0x5e, 0x83, 0xed, 0x8b, 0x00, 0xb2,
0x73, 0xdd, 0xec, 0x53, 0x46, 0xc2, 0x74, 0xd2, 0x76, 0x86, 0xe4, 0x79, 0xeb, 0xa1, 0x4a, 0x22,
0x3f, 0x06, 0x2c, 0x88, 0xf1, 0x76, 0x57, 0xb5, 0x79, 0xec, 0xa3, 0x81, 0x84, 0xef, 0xef, 0x6d,
0xef, 0x3e, 0x3e, 0xd8, 0x6d, 0x4e, 0x2c, 0x7b, 0x8d, 0x5a, 0xc9, 0x29, 0x35, 0x67, 0x7d, 0xad,
0xbb, 0x22, 0xb0, 0xff, 0x12, 0xfc, 0x35, 0x2a, 0xb9, 0x7e, 0xe6, 0xa5, 0xb5, 0x41, 0x26, 0x56,
0x1b, 0xab, 0x74, 0x95, 0xd4, 0x5a, 0x60, 0xd2, 0x44, 0x42, 0xc8, 0x4e, 0x3a, 0x20, 0x99, 0xe9,
0x00, 0x4b, 0x00, 0x55, 0x9a, 0x04, 0xc0, 0x42, 0x05, 0xc8, 0xa4, 0x32, 0x0c, 0xde, 0x0a, 0x34,
0x1e, 0x9d, 0x22, 0x93, 0xa7, 0x25, 0xe7, 0xbf, 0xe4, 0x1e, 0x59, 0x18, 0x9a, 0xc1, 0x76, 0x54,
0x90, 0x66, 0xd6, 0x59, 0x76, 0x7a, 0x63, 0xbc, 0x35, 0x1c, 0x85, 0x01, 0x1e, 0xaa, 0x00, 0xf9,
0x4b, 0x36, 0x52, 0x2a, 0xec, 0xa5, 0x8f, 0x22, 0xae, 0xdb, 0xdf, 0x4a, 0xe5, 0x8c, 0xdf, 0xd2,
0xb7, 0xa7, 0xec, 0x65, 0xaf, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x05, 0x01, 0x35, 0x43, 0x67,
0x05, 0x00, 0x00,
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4b, 0x4f, 0x13, 0x51,
0x14, 0xce, 0x14, 0x82, 0xf4, 0x48, 0x14, 0x2f, 0x8f, 0x90, 0x62, 0xf4, 0x52, 0x13, 0x95, 0xc6,
0xce, 0x2d, 0xa5, 0x6c, 0x20, 0x26, 0x54, 0x40, 0x42, 0x82, 0x92, 0x14, 0xe3, 0xc2, 0x8d, 0x99,
0xce, 0x1c, 0xa6, 0x23, 0xed, 0xbd, 0xe3, 0x9c, 0x3b, 0x60, 0x62, 0x4c, 0x8c, 0x5b, 0x77, 0xe8,
0xca, 0x9f, 0xe0, 0xd2, 0xbf, 0xe2, 0xca, 0xbd, 0x3f, 0xc4, 0xcc, 0xa3, 0xed, 0xf4, 0x21, 0x81,
0xd5, 0x64, 0xce, 0xf9, 0xce, 0xf7, 0x9d, 0xc7, 0x97, 0x0b, 0xcc, 0xf2, 0x3d, 0x71, 0x12, 0x28,
0xa9, 0x51, 0x3a, 0xa6, 0x1f, 0x28, 0xad, 0x58, 0x5e, 0xf9, 0x28, 0x3b, 0x96, 0xb6, 0x5b, 0x85,
0x38, 0xdd, 0x41, 0x22, 0xcb, 0x45, 0x4a, 0xd2, 0x85, 0xbb, 0xae, 0x52, 0x6e, 0x1b, 0x45, 0x94,
0xb2, 0xa4, 0x54, 0xda, 0xd2, 0x9e, 0x92, 0xdd, 0xec, 0x93, 0xf8, 0x63, 0x97, 0x5d, 0x94, 0x65,
0x3a, 0xb7, 0x5c, 0x17, 0x03, 0xa1, 0xfc, 0x18, 0x31, 0x8a, 0x2e, 0x6e, 0xc3, 0xdc, 0x4e, 0x80,
0x96, 0xc6, 0x57, 0x9e, 0x7d, 0x8a, 0xba, 0x81, 0xef, 0x43, 0x24, 0xcd, 0x56, 0x61, 0x4a, 0xc7,
0x81, 0x25, 0x83, 0x1b, 0x8f, 0x6f, 0x56, 0xef, 0x98, 0xbd, 0x96, 0xcc, 0x14, 0x99, 0x02, 0x8a,
0x75, 0x98, 0x1f, 0x64, 0x20, 0x5f, 0x49, 0xc2, 0xeb, 0x50, 0x54, 0x61, 0x6e, 0x17, 0xdb, 0x38,
0xdc, 0xc4, 0x32, 0xe4, 0x13, 0xc0, 0x5b, 0xcf, 0x89, 0x49, 0xf2, 0x8d, 0xe9, 0x24, 0x70, 0xe0,
0x14, 0x17, 0x61, 0x7e, 0xb0, 0x26, 0x91, 0x2d, 0x0a, 0x98, 0xdd, 0x47, 0x7d, 0x0d, 0xa2, 0x1a,
0x2c, 0xec, 0xa3, 0xae, 0x13, 0x79, 0xae, 0xec, 0xa0, 0xd4, 0x74, 0xa5, 0xaa, 0x23, 0x58, 0x1c,
0xae, 0x4a, 0xe7, 0xde, 0x00, 0xb0, 0x7a, 0xe1, 0x74, 0xf6, 0x85, 0xcc, 0xec, 0xfd, 0x9a, 0x46,
0x06, 0x58, 0xbd, 0x98, 0x84, 0xdb, 0xcf, 0x53, 0x1b, 0x1c, 0x63, 0x70, 0xe6, 0xd9, 0xc8, 0xce,
0x61, 0x26, 0xbb, 0x5a, 0x76, 0x2f, 0x43, 0x33, 0xe6, 0x6a, 0x85, 0xfb, 0xff, 0xcd, 0xa7, 0xcb,
0x79, 0xf8, 0xe5, 0xf7, 0xdf, 0x6f, 0x39, 0x5e, 0x5c, 0x16, 0x67, 0x6b, 0x3d, 0xd3, 0x51, 0xa2,
0x26, 0x92, 0xd9, 0x68, 0xd3, 0x28, 0xb1, 0xcf, 0x06, 0xcc, 0x64, 0xb7, 0x3b, 0xa0, 0x3c, 0xe6,
0x54, 0x03, 0xca, 0xe3, 0xcf, 0x12, 0x2b, 0xaf, 0x96, 0x1e, 0x5d, 0xa2, 0x2c, 0x3e, 0xf6, 0xf6,
0xfd, 0x89, 0xb5, 0x21, 0xdf, 0xbb, 0x23, 0x5b, 0xce, 0xd0, 0x0f, 0x5f, 0xb7, 0x30, 0x6a, 0xac,
0xae, 0x1a, 0xbb, 0xb2, 0xda, 0x77, 0x03, 0x6e, 0x0d, 0xde, 0x93, 0xf1, 0x41, 0xcd, 0x51, 0x83,
0x14, 0x56, 0x2e, 0x41, 0xa4, 0x63, 0x6f, 0xc5, 0x8d, 0x6c, 0xb0, 0xf5, 0x2b, 0x36, 0x22, 0xfa,
0x8e, 0xa0, 0x8a, 0xf1, 0xec, 0xeb, 0xc4, 0x45, 0xfd, 0x4f, 0x8e, 0xfd, 0x32, 0x60, 0xba, 0xeb,
0x8d, 0xe2, 0x01, 0xc0, 0x91, 0x8f, 0x92, 0xbf, 0x88, 0x74, 0xd9, 0x62, 0x4b, 0x6b, 0x9f, 0x36,
0x85, 0x88, 0x5a, 0x29, 0x27, 0xbd, 0x38, 0x78, 0x56, 0x78, 0xd0, 0xff, 0x2f, 0x3b, 0x1e, 0xd9,
0x21, 0xd1, 0x76, 0xf2, 0x6c, 0xb8, 0x81, 0x0a, 0x7d, 0x32, 0x6d, 0xd5, 0x29, 0xbd, 0x06, 0x56,
0xf7, 0x2d, 0xbb, 0x85, 0xbc, 0x6a, 0x56, 0xf8, 0xa1, 0x67, 0x63, 0x64, 0xe0, 0xed, 0x2e, 0xa5,
0xeb, 0xe9, 0x56, 0xd8, 0x8c, 0x90, 0x22, 0x29, 0x3d, 0x51, 0x81, 0x6b, 0x75, 0x90, 0x32, 0x62,
0xa2, 0xd9, 0x56, 0x4d, 0xd1, 0xb1, 0x48, 0x63, 0x20, 0x0e, 0x0f, 0x76, 0xf6, 0x5e, 0x1e, 0xef,
0x55, 0x27, 0xd6, 0xcc, 0x4a, 0x29, 0x67, 0xe4, 0xaa, 0xb3, 0x96, 0xef, 0xb7, 0x3d, 0x3b, 0x7e,
0x71, 0xc4, 0x3b, 0x52, 0x72, 0x73, 0x24, 0xd2, 0xd8, 0x82, 0x89, 0x5a, 0xa5, 0xc6, 0x6a, 0x50,
0x6a, 0xa0, 0x0e, 0x03, 0x89, 0x0e, 0x3f, 0x6f, 0xa1, 0xe4, 0xba, 0x85, 0x3c, 0x40, 0x52, 0x61,
0x60, 0x23, 0x77, 0x14, 0x12, 0x97, 0x4a, 0x73, 0xfc, 0xe0, 0x91, 0x36, 0xd9, 0x14, 0x4c, 0xfe,
0xc8, 0x19, 0x37, 0x82, 0xa7, 0xb0, 0xd4, 0x5f, 0x06, 0xdf, 0x55, 0x76, 0x18, 0xad, 0x2e, 0x66,
0x67, 0x2b, 0xe3, 0x57, 0x23, 0xc8, 0xd3, 0x28, 0x1c, 0x65, 0x93, 0x78, 0xc3, 0x87, 0x52, 0x99,
0xb9, 0xfc, 0x53, 0x57, 0xf8, 0xcd, 0x9f, 0xb9, 0x7c, 0xc4, 0x1f, 0xd3, 0x37, 0xa7, 0xe2, 0x27,
0x73, 0xfd, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x27, 0x68, 0xa0, 0xb3, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -298,17 +369,17 @@ type FrontendServiceClient interface {
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
CreateTicket(ctx context.Context, in *CreateTicketRequest, opts ...grpc.CallOption) (*Ticket, error)
CreateTicket(ctx context.Context, in *CreateTicketRequest, opts ...grpc.CallOption) (*CreateTicketResponse, error)
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
DeleteTicket(ctx context.Context, in *DeleteTicketRequest, opts ...grpc.CallOption) (*empty.Empty, error)
DeleteTicket(ctx context.Context, in *DeleteTicketRequest, opts ...grpc.CallOption) (*DeleteTicketResponse, error)
// GetTicket get the Ticket associated with the specified TicketId.
GetTicket(ctx context.Context, in *GetTicketRequest, opts ...grpc.CallOption) (*Ticket, error)
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
WatchAssignments(ctx context.Context, in *WatchAssignmentsRequest, opts ...grpc.CallOption) (FrontendService_WatchAssignmentsClient, error)
GetAssignments(ctx context.Context, in *GetAssignmentsRequest, opts ...grpc.CallOption) (FrontendService_GetAssignmentsClient, error)
}
type frontendServiceClient struct {
@ -319,8 +390,8 @@ func NewFrontendServiceClient(cc *grpc.ClientConn) FrontendServiceClient {
return &frontendServiceClient{cc}
}
func (c *frontendServiceClient) CreateTicket(ctx context.Context, in *CreateTicketRequest, opts ...grpc.CallOption) (*Ticket, error) {
out := new(Ticket)
func (c *frontendServiceClient) CreateTicket(ctx context.Context, in *CreateTicketRequest, opts ...grpc.CallOption) (*CreateTicketResponse, error) {
out := new(CreateTicketResponse)
err := c.cc.Invoke(ctx, "/openmatch.FrontendService/CreateTicket", in, out, opts...)
if err != nil {
return nil, err
@ -328,8 +399,8 @@ func (c *frontendServiceClient) CreateTicket(ctx context.Context, in *CreateTick
return out, nil
}
func (c *frontendServiceClient) DeleteTicket(ctx context.Context, in *DeleteTicketRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
func (c *frontendServiceClient) DeleteTicket(ctx context.Context, in *DeleteTicketRequest, opts ...grpc.CallOption) (*DeleteTicketResponse, error) {
out := new(DeleteTicketResponse)
err := c.cc.Invoke(ctx, "/openmatch.FrontendService/DeleteTicket", in, out, opts...)
if err != nil {
return nil, err
@ -346,12 +417,12 @@ func (c *frontendServiceClient) GetTicket(ctx context.Context, in *GetTicketRequ
return out, nil
}
func (c *frontendServiceClient) WatchAssignments(ctx context.Context, in *WatchAssignmentsRequest, opts ...grpc.CallOption) (FrontendService_WatchAssignmentsClient, error) {
stream, err := c.cc.NewStream(ctx, &_FrontendService_serviceDesc.Streams[0], "/openmatch.FrontendService/WatchAssignments", opts...)
func (c *frontendServiceClient) GetAssignments(ctx context.Context, in *GetAssignmentsRequest, opts ...grpc.CallOption) (FrontendService_GetAssignmentsClient, error) {
stream, err := c.cc.NewStream(ctx, &_FrontendService_serviceDesc.Streams[0], "/openmatch.FrontendService/GetAssignments", opts...)
if err != nil {
return nil, err
}
x := &frontendServiceWatchAssignmentsClient{stream}
x := &frontendServiceGetAssignmentsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -361,17 +432,17 @@ func (c *frontendServiceClient) WatchAssignments(ctx context.Context, in *WatchA
return x, nil
}
type FrontendService_WatchAssignmentsClient interface {
Recv() (*WatchAssignmentsResponse, error)
type FrontendService_GetAssignmentsClient interface {
Recv() (*GetAssignmentsResponse, error)
grpc.ClientStream
}
type frontendServiceWatchAssignmentsClient struct {
type frontendServiceGetAssignmentsClient struct {
grpc.ClientStream
}
func (x *frontendServiceWatchAssignmentsClient) Recv() (*WatchAssignmentsResponse, error) {
m := new(WatchAssignmentsResponse)
func (x *frontendServiceGetAssignmentsClient) Recv() (*GetAssignmentsResponse, error) {
m := new(GetAssignmentsResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
@ -384,34 +455,34 @@ type FrontendServiceServer interface {
// A ticket is considered as ready for matchmaking once it is created.
// - If a TicketId exists in a Ticket request, an auto-generated TicketId will override this field.
// - If SearchFields exist in a Ticket, CreateTicket will also index these fields such that one can query the ticket with query.QueryTickets function.
CreateTicket(context.Context, *CreateTicketRequest) (*Ticket, error)
CreateTicket(context.Context, *CreateTicketRequest) (*CreateTicketResponse, error)
// DeleteTicket immediately stops Open Match from using the Ticket for matchmaking and removes the Ticket from state storage.
// The client must delete the Ticket when finished matchmaking with it.
// - If SearchFields exist in a Ticket, DeleteTicket will deindex the fields lazily.
// Users may still be able to assign/get a ticket after calling DeleteTicket on it.
DeleteTicket(context.Context, *DeleteTicketRequest) (*empty.Empty, error)
DeleteTicket(context.Context, *DeleteTicketRequest) (*DeleteTicketResponse, error)
// GetTicket get the Ticket associated with the specified TicketId.
GetTicket(context.Context, *GetTicketRequest) (*Ticket, error)
// WatchAssignments stream back Assignment of the specified TicketId if it is updated.
// GetAssignments stream back Assignment of the specified TicketId if it is updated.
// - If the Assignment is not updated, GetAssignment will retry using the configured backoff strategy.
WatchAssignments(*WatchAssignmentsRequest, FrontendService_WatchAssignmentsServer) error
GetAssignments(*GetAssignmentsRequest, FrontendService_GetAssignmentsServer) error
}
// UnimplementedFrontendServiceServer can be embedded to have forward compatible implementations.
type UnimplementedFrontendServiceServer struct {
}
func (*UnimplementedFrontendServiceServer) CreateTicket(ctx context.Context, req *CreateTicketRequest) (*Ticket, error) {
func (*UnimplementedFrontendServiceServer) CreateTicket(ctx context.Context, req *CreateTicketRequest) (*CreateTicketResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateTicket not implemented")
}
func (*UnimplementedFrontendServiceServer) DeleteTicket(ctx context.Context, req *DeleteTicketRequest) (*empty.Empty, error) {
func (*UnimplementedFrontendServiceServer) DeleteTicket(ctx context.Context, req *DeleteTicketRequest) (*DeleteTicketResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteTicket not implemented")
}
func (*UnimplementedFrontendServiceServer) GetTicket(ctx context.Context, req *GetTicketRequest) (*Ticket, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTicket not implemented")
}
func (*UnimplementedFrontendServiceServer) WatchAssignments(req *WatchAssignmentsRequest, srv FrontendService_WatchAssignmentsServer) error {
return status.Errorf(codes.Unimplemented, "method WatchAssignments not implemented")
func (*UnimplementedFrontendServiceServer) GetAssignments(req *GetAssignmentsRequest, srv FrontendService_GetAssignmentsServer) error {
return status.Errorf(codes.Unimplemented, "method GetAssignments not implemented")
}
func RegisterFrontendServiceServer(s *grpc.Server, srv FrontendServiceServer) {
@ -472,24 +543,24 @@ func _FrontendService_GetTicket_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
func _FrontendService_WatchAssignments_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(WatchAssignmentsRequest)
func _FrontendService_GetAssignments_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetAssignmentsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(FrontendServiceServer).WatchAssignments(m, &frontendServiceWatchAssignmentsServer{stream})
return srv.(FrontendServiceServer).GetAssignments(m, &frontendServiceGetAssignmentsServer{stream})
}
type FrontendService_WatchAssignmentsServer interface {
Send(*WatchAssignmentsResponse) error
type FrontendService_GetAssignmentsServer interface {
Send(*GetAssignmentsResponse) error
grpc.ServerStream
}
type frontendServiceWatchAssignmentsServer struct {
type frontendServiceGetAssignmentsServer struct {
grpc.ServerStream
}
func (x *frontendServiceWatchAssignmentsServer) Send(m *WatchAssignmentsResponse) error {
func (x *frontendServiceGetAssignmentsServer) Send(m *GetAssignmentsResponse) error {
return x.ServerStream.SendMsg(m)
}
@ -512,8 +583,8 @@ var _FrontendService_serviceDesc = grpc.ServiceDesc{
},
Streams: []grpc.StreamDesc{
{
StreamName: "WatchAssignments",
Handler: _FrontendService_WatchAssignments_Handler,
StreamName: "GetAssignments",
Handler: _FrontendService_GetAssignments_Handler,
ServerStreams: true,
},
},

@ -173,8 +173,8 @@ func local_request_FrontendService_GetTicket_0(ctx context.Context, marshaler ru
}
func request_FrontendService_WatchAssignments_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (FrontendService_WatchAssignmentsClient, runtime.ServerMetadata, error) {
var protoReq WatchAssignmentsRequest
func request_FrontendService_GetAssignments_0(ctx context.Context, marshaler runtime.Marshaler, client FrontendServiceClient, req *http.Request, pathParams map[string]string) (FrontendService_GetAssignmentsClient, runtime.ServerMetadata, error) {
var protoReq GetAssignmentsRequest
var metadata runtime.ServerMetadata
var (
@ -195,7 +195,7 @@ func request_FrontendService_WatchAssignments_0(ctx context.Context, marshaler r
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "ticket_id", err)
}
stream, err := client.WatchAssignments(ctx, &protoReq)
stream, err := client.GetAssignments(ctx, &protoReq)
if err != nil {
return nil, metadata, err
}
@ -273,7 +273,7 @@ func RegisterFrontendServiceHandlerServer(ctx context.Context, mux *runtime.Serv
})
mux.Handle("GET", pattern_FrontendService_WatchAssignments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_FrontendService_GetAssignments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@ -381,7 +381,7 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
})
mux.Handle("GET", pattern_FrontendService_WatchAssignments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_FrontendService_GetAssignments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
@ -390,14 +390,14 @@ func RegisterFrontendServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_FrontendService_WatchAssignments_0(rctx, inboundMarshaler, client, req, pathParams)
resp, md, err := request_FrontendService_GetAssignments_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_FrontendService_WatchAssignments_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
forward_FrontendService_GetAssignments_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
})
@ -411,7 +411,7 @@ var (
pattern_FrontendService_GetTicket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "frontendservice", "tickets", "ticket_id"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_FrontendService_WatchAssignments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "frontendservice", "tickets", "ticket_id", "assignments"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_FrontendService_GetAssignments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "frontendservice", "tickets", "ticket_id", "assignments"}, "", runtime.AssumeColonVerbOpt(true)))
)
var (
@ -421,5 +421,5 @@ var (
forward_FrontendService_GetTicket_0 = runtime.ForwardResponseMessage
forward_FrontendService_WatchAssignments_0 = runtime.ForwardResponseStream
forward_FrontendService_GetAssignments_0 = runtime.ForwardResponseStream
)

@ -7,7 +7,6 @@ import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/rpc/status"
math "math"
)
@ -40,13 +39,10 @@ type Ticket struct {
// Customized information not inspected by Open Match, to be used by the match
// making function, evaluator, and components making calls to Open Match.
// Optional, depending on the requirements of the connected systems.
Extensions map[string]*any.Any `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Create time represents the time at which this Ticket was created. It is
// populated by Open Match at the time of Ticket creation.
CreateTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Extensions map[string]*any.Any `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Ticket) Reset() { *m = Ticket{} }
@ -102,13 +98,6 @@ func (m *Ticket) GetExtensions() map[string]*any.Any {
return nil
}
func (m *Ticket) GetCreateTime() *timestamp.Timestamp {
if m != nil {
return m.CreateTime
}
return nil
}
// Search fields are the fields which Open Match is aware of, and can be used
// when specifying filters.
type SearchFields struct {
@ -397,23 +386,17 @@ func (m *TagPresentFilter) GetTag() string {
return ""
}
// Pool specfies a set of criteria that are used to select a subset of Tickets
// that meet all the criteria.
type Pool struct {
// A developer-chosen human-readable name for this Pool.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Set of Filters indicating the filtering criteria. Selected tickets must
// Set of Filters indicating the filtering criteria. Selected players must
// match every Filter.
DoubleRangeFilters []*DoubleRangeFilter `protobuf:"bytes,2,rep,name=double_range_filters,json=doubleRangeFilters,proto3" json:"double_range_filters,omitempty"`
StringEqualsFilters []*StringEqualsFilter `protobuf:"bytes,4,rep,name=string_equals_filters,json=stringEqualsFilters,proto3" json:"string_equals_filters,omitempty"`
TagPresentFilters []*TagPresentFilter `protobuf:"bytes,5,rep,name=tag_present_filters,json=tagPresentFilters,proto3" json:"tag_present_filters,omitempty"`
// If specified, only Tickets created before the specified time are selected.
CreatedBefore *timestamp.Timestamp `protobuf:"bytes,6,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"`
// If specified, only Tickets created after the specified time are selected.
CreatedAfter *timestamp.Timestamp `protobuf:"bytes,7,opt,name=created_after,json=createdAfter,proto3" json:"created_after,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
DoubleRangeFilters []*DoubleRangeFilter `protobuf:"bytes,2,rep,name=double_range_filters,json=doubleRangeFilters,proto3" json:"double_range_filters,omitempty"`
StringEqualsFilters []*StringEqualsFilter `protobuf:"bytes,4,rep,name=string_equals_filters,json=stringEqualsFilters,proto3" json:"string_equals_filters,omitempty"`
TagPresentFilters []*TagPresentFilter `protobuf:"bytes,5,rep,name=tag_present_filters,json=tagPresentFilters,proto3" json:"tag_present_filters,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Pool) Reset() { *m = Pool{} }
@ -469,20 +452,6 @@ func (m *Pool) GetTagPresentFilters() []*TagPresentFilter {
return nil
}
func (m *Pool) GetCreatedBefore() *timestamp.Timestamp {
if m != nil {
return m.CreatedBefore
}
return nil
}
func (m *Pool) GetCreatedAfter() *timestamp.Timestamp {
if m != nil {
return m.CreatedAfter
}
return nil
}
// A MatchProfile is Open Match's representation of a Match specification. It is
// used to indicate the criteria for selecting players for a match. A
// MatchProfile is the input to the API to get matches and is passed to the
@ -652,57 +621,52 @@ func init() {
func init() { proto.RegisterFile("api/messages.proto", fileDescriptor_cb9fb1f207fd5b8c) }
var fileDescriptor_cb9fb1f207fd5b8c = []byte{
// 830 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0xdd, 0x6e, 0xe3, 0x44,
0x14, 0x56, 0x6c, 0x27, 0x69, 0x4e, 0xd2, 0xd6, 0x9d, 0xed, 0x6a, 0xbd, 0x81, 0x85, 0x60, 0xa8,
0xa8, 0x40, 0x38, 0x52, 0x11, 0x12, 0xe2, 0x47, 0x90, 0x15, 0x2d, 0x6c, 0x11, 0x50, 0xdc, 0x8a,
0x0b, 0x6e, 0xac, 0x49, 0x3c, 0xf1, 0x5a, 0xb5, 0xc7, 0xc6, 0x33, 0x59, 0x6d, 0xde, 0x83, 0xa7,
0xe0, 0x9a, 0x6b, 0x9e, 0x82, 0x87, 0xe0, 0x9e, 0x17, 0x40, 0xf3, 0x63, 0x67, 0xd6, 0x09, 0xbb,
0xdc, 0xa0, 0xde, 0xcd, 0x9c, 0x9f, 0x6f, 0xce, 0xf9, 0xce, 0xe7, 0x63, 0x40, 0xb8, 0x4c, 0xa7,
0x39, 0x61, 0x0c, 0x27, 0x84, 0x05, 0x65, 0x55, 0xf0, 0x02, 0x0d, 0x8a, 0x92, 0xd0, 0x1c, 0xf3,
0xc5, 0xd3, 0xf1, 0x83, 0xa4, 0x28, 0x92, 0x8c, 0x4c, 0xab, 0x72, 0x31, 0x65, 0x1c, 0xf3, 0x95,
0x8e, 0x19, 0x3f, 0xd4, 0x0e, 0x79, 0x9b, 0xaf, 0x96, 0x53, 0x4c, 0xd7, 0xda, 0xf5, 0x66, 0xdb,
0xc5, 0xd3, 0x9c, 0x30, 0x8e, 0xf3, 0x52, 0x05, 0xf8, 0x7f, 0x59, 0xd0, 0xbb, 0x49, 0x17, 0xb7,
0x84, 0xa3, 0x03, 0xb0, 0xd2, 0xd8, 0xeb, 0x4c, 0x3a, 0xa7, 0x83, 0xd0, 0x4a, 0x63, 0xf4, 0x11,
0x00, 0x66, 0x2c, 0x4d, 0x68, 0x4e, 0x28, 0xf7, 0xec, 0x49, 0xe7, 0x74, 0x78, 0x76, 0x3f, 0x68,
0xea, 0x09, 0x66, 0x8d, 0x33, 0x34, 0x02, 0xd1, 0x67, 0xb0, 0xcf, 0x08, 0xae, 0x16, 0x4f, 0xa3,
0x65, 0x4a, 0xb2, 0x98, 0x79, 0x8e, 0xcc, 0x7c, 0x60, 0x64, 0x5e, 0x4b, 0xff, 0x85, 0x74, 0x87,
0x23, 0x66, 0xdc, 0xd0, 0x0c, 0x80, 0x3c, 0xe7, 0x84, 0xb2, 0xb4, 0xa0, 0xcc, 0xeb, 0x4e, 0xec,
0xd3, 0xe1, 0xd9, 0x5b, 0x46, 0xaa, 0xaa, 0x35, 0x38, 0x6f, 0x62, 0xce, 0x29, 0xaf, 0xd6, 0xa1,
0x91, 0x84, 0x3e, 0x85, 0xe1, 0xa2, 0x22, 0x98, 0x93, 0x48, 0x34, 0xeb, 0xf5, 0xe4, 0xf3, 0xe3,
0x40, 0x31, 0x11, 0xd4, 0x4c, 0x04, 0x37, 0x35, 0x13, 0x21, 0xa8, 0x70, 0x61, 0x18, 0x5f, 0xc3,
0x61, 0x0b, 0x1b, 0xb9, 0x60, 0xdf, 0x92, 0xb5, 0x26, 0x46, 0x1c, 0xd1, 0x7b, 0xd0, 0x7d, 0x86,
0xb3, 0x15, 0xf1, 0x2c, 0x89, 0x7d, 0xbc, 0x85, 0x3d, 0xa3, 0xeb, 0x50, 0x85, 0x7c, 0x62, 0x7d,
0xdc, 0xb9, 0x74, 0xf6, 0x2c, 0xd7, 0xf6, 0x7f, 0xb7, 0x60, 0x64, 0x76, 0x8e, 0xbe, 0x81, 0x61,
0x5c, 0xac, 0xe6, 0x19, 0x89, 0x70, 0x95, 0x30, 0xaf, 0x23, 0x9b, 0x7d, 0xf7, 0x5f, 0x78, 0x0a,
0xbe, 0x92, 0xa1, 0xb3, 0x2a, 0xa9, 0x5b, 0x8e, 0x1b, 0x83, 0x40, 0x62, 0xbc, 0x4a, 0x69, 0xa2,
0x90, 0xac, 0x97, 0x23, 0x5d, 0xcb, 0x50, 0x03, 0x89, 0x35, 0x06, 0x84, 0xc0, 0xe1, 0x38, 0x61,
0x9e, 0x3d, 0xb1, 0x4f, 0x07, 0xa1, 0x3c, 0x8f, 0x3f, 0x87, 0xc3, 0xd6, 0xe3, 0x3b, 0x38, 0x39,
0x36, 0x39, 0xe9, 0x18, 0xdd, 0x8b, 0xf4, 0xd6, 0x8b, 0xaf, 0x4a, 0x1f, 0x18, 0xe9, 0xfe, 0x9f,
0x1d, 0x80, 0x8d, 0xd4, 0xd0, 0x1b, 0x00, 0x8b, 0x82, 0x52, 0xb2, 0xe0, 0x69, 0x41, 0x35, 0x82,
0x61, 0x41, 0xe7, 0x2f, 0x08, 0xc8, 0x91, 0x4c, 0x9c, 0xec, 0x54, 0xed, 0xcb, 0x44, 0xf4, 0x3f,
0xea, 0xe0, 0xd2, 0xd9, 0xb3, 0x5d, 0xc7, 0xff, 0x09, 0x8e, 0x14, 0xa9, 0x21, 0xa6, 0x09, 0xb9,
0x48, 0x33, 0x4e, 0x2a, 0xf4, 0x08, 0x60, 0xa3, 0x08, 0xfd, 0xd2, 0xa0, 0x99, 0xb3, 0xa8, 0x20,
0xc7, 0xcf, 0x35, 0xc3, 0xe2, 0x28, 0x2d, 0x29, 0x95, 0x1f, 0xa7, 0xb0, 0xa4, 0xd4, 0x7f, 0x02,
0x48, 0xb1, 0x7d, 0xfe, 0xcb, 0x0a, 0x67, 0x6c, 0x03, 0xbc, 0x11, 0x48, 0x0d, 0xdc, 0x8c, 0x7d,
0x37, 0xfb, 0xfe, 0x3b, 0xe0, 0xde, 0xe0, 0xe4, 0xaa, 0x22, 0x8c, 0x50, 0xae, 0x81, 0x5c, 0xb0,
0x39, 0xae, 0x11, 0xc4, 0xd1, 0xff, 0xd5, 0x06, 0xe7, 0xaa, 0x28, 0x32, 0x21, 0x1d, 0x8a, 0x73,
0xa2, 0x7d, 0xf2, 0x8c, 0xbe, 0x87, 0x63, 0xdd, 0x50, 0x25, 0xda, 0x8c, 0x96, 0x12, 0xa5, 0x56,
0xe8, 0xeb, 0xc6, 0x5c, 0xb6, 0xc8, 0x08, 0x51, 0xdc, 0x36, 0x31, 0xf4, 0x23, 0xdc, 0xd7, 0x7d,
0x10, 0xd9, 0x5e, 0x03, 0xa8, 0x06, 0xfd, 0xc8, 0x94, 0xfc, 0x16, 0x0b, 0xe1, 0x3d, 0xb6, 0x65,
0x63, 0xe8, 0x5b, 0xb8, 0xc7, 0x71, 0x12, 0x95, 0xaa, 0xcd, 0x06, 0x50, 0xad, 0x9e, 0xd7, 0xcc,
0xd5, 0xd3, 0xe2, 0x22, 0x3c, 0xe2, 0x2d, 0x8b, 0x58, 0x5f, 0x07, 0x6a, 0x99, 0xc4, 0xd1, 0x9c,
0x2c, 0x8b, 0xea, 0xbf, 0xac, 0x9f, 0x7d, 0x9d, 0xf1, 0x58, 0x26, 0xa0, 0x2f, 0xa0, 0x36, 0x44,
0x78, 0xc9, 0x49, 0xe5, 0xf5, 0x5f, 0x89, 0x30, 0xd2, 0x09, 0x33, 0x11, 0xaf, 0xf5, 0xf5, 0x77,
0x07, 0x46, 0xdf, 0x89, 0xba, 0xaf, 0xaa, 0x62, 0x99, 0x66, 0x64, 0xe7, 0x78, 0x4e, 0xa0, 0x5b,
0x16, 0x45, 0xa6, 0x3e, 0xf7, 0xe1, 0xd9, 0xa1, 0xd1, 0xad, 0x18, 0x69, 0xa8, 0xbc, 0xe8, 0xeb,
0x1d, 0x4b, 0xd9, 0xdc, 0x2e, 0xe6, 0x3b, 0x77, 0xf7, 0x55, 0x39, 0x6e, 0xd7, 0xff, 0xc3, 0x82,
0xae, 0xac, 0x06, 0x3d, 0x84, 0x3d, 0x59, 0x5c, 0xd4, 0xfc, 0xd3, 0xfa, 0xf2, 0xfe, 0x24, 0x46,
0x6f, 0xc3, 0xbe, 0x72, 0x95, 0xaa, 0x64, 0xad, 0xfa, 0x51, 0x6e, 0xd2, 0x75, 0x02, 0x07, 0x2a,
0x68, 0xb9, 0xa2, 0x6a, 0xd7, 0xd8, 0x32, 0x4a, 0xa5, 0x5e, 0x68, 0x23, 0x7a, 0x1f, 0xfa, 0x5c,
0xfe, 0x92, 0x6a, 0x09, 0x1e, 0x6d, 0xfd, 0xac, 0xc2, 0x3a, 0x02, 0x7d, 0xf9, 0x02, 0x8f, 0x7d,
0x19, 0x3f, 0x69, 0xf3, 0x78, 0x17, 0x04, 0x76, 0xdd, 0xde, 0xa5, 0xb3, 0xd7, 0x73, 0xfb, 0x8f,
0x83, 0x9f, 0x27, 0xa2, 0x9e, 0x0f, 0x54, 0x41, 0x31, 0x79, 0x36, 0xdd, 0x5c, 0xa7, 0xe5, 0x6d,
0x32, 0x2d, 0xe7, 0xbf, 0x59, 0x83, 0x1f, 0x4a, 0x42, 0x65, 0xb1, 0xf3, 0x9e, 0x04, 0xfd, 0xf0,
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x53, 0xe9, 0x37, 0xbc, 0x08, 0x00, 0x00,
// 747 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0xdd, 0x6e, 0xd3, 0x30,
0x14, 0x56, 0x7e, 0xfa, 0x77, 0xda, 0x6d, 0x99, 0xb7, 0x69, 0x5d, 0x61, 0xa8, 0x04, 0x26, 0x2a,
0x10, 0xa9, 0x34, 0x84, 0x84, 0x10, 0x48, 0x14, 0xd1, 0xc1, 0x86, 0x80, 0x91, 0x4d, 0x5c, 0x70,
0x53, 0xb9, 0x8d, 0x9b, 0x45, 0x4b, 0x9d, 0x10, 0xbb, 0xd3, 0xfa, 0x16, 0x3c, 0x07, 0xdc, 0x72,
0xcd, 0x53, 0xf0, 0x26, 0xbc, 0x00, 0x8a, 0x9d, 0xa6, 0x5e, 0x5b, 0xe0, 0x72, 0x77, 0xce, 0xf9,
0xf9, 0xce, 0x77, 0xbe, 0x73, 0xec, 0x00, 0xc2, 0x71, 0xd0, 0x1e, 0x11, 0xc6, 0xb0, 0x4f, 0x98,
0x13, 0x27, 0x11, 0x8f, 0x50, 0x25, 0x8a, 0x09, 0x1d, 0x61, 0x3e, 0x38, 0x6b, 0x6c, 0xfb, 0x51,
0xe4, 0x87, 0xa4, 0x9d, 0xc4, 0x83, 0x36, 0xe3, 0x98, 0x8f, 0xb3, 0x98, 0xc6, 0x4e, 0xe6, 0x10,
0x5f, 0xfd, 0xf1, 0xb0, 0x8d, 0xe9, 0x44, 0xba, 0xec, 0xef, 0x3a, 0x14, 0x4f, 0x83, 0xc1, 0x39,
0xe1, 0x68, 0x15, 0xf4, 0xc0, 0xab, 0x6b, 0x4d, 0xad, 0x55, 0x71, 0xf5, 0xc0, 0x43, 0x8f, 0x01,
0x30, 0x63, 0x81, 0x4f, 0x47, 0x84, 0xf2, 0xba, 0xd1, 0xd4, 0x5a, 0xd5, 0xfd, 0x2d, 0x27, 0x2f,
0xe7, 0x74, 0x72, 0xa7, 0xab, 0x04, 0xa2, 0x67, 0xb0, 0xc2, 0x08, 0x4e, 0x06, 0x67, 0xbd, 0x61,
0x40, 0x42, 0x8f, 0xd5, 0x4d, 0x91, 0xb9, 0xad, 0x64, 0x9e, 0x08, 0xff, 0x81, 0x70, 0xbb, 0x35,
0xa6, 0x7c, 0xa1, 0x0e, 0x00, 0xb9, 0xe4, 0x84, 0xb2, 0x20, 0xa2, 0xac, 0x5e, 0x68, 0x1a, 0xad,
0xea, 0xfe, 0x6d, 0x25, 0x55, 0x72, 0x75, 0xba, 0x79, 0x4c, 0x97, 0xf2, 0x64, 0xe2, 0x2a, 0x49,
0x8d, 0x13, 0x58, 0x9b, 0x73, 0x23, 0x0b, 0x8c, 0x73, 0x32, 0xc9, 0x7a, 0x4b, 0x8f, 0xe8, 0x3e,
0x14, 0x2e, 0x70, 0x38, 0x26, 0x75, 0x5d, 0xb0, 0xdb, 0x74, 0xa4, 0x44, 0xce, 0x54, 0x22, 0xa7,
0x43, 0x27, 0xae, 0x0c, 0x79, 0xaa, 0x3f, 0xd1, 0x8e, 0xcc, 0xb2, 0x6e, 0x19, 0xf6, 0x0f, 0x1d,
0x6a, 0x2a, 0x79, 0xf4, 0x06, 0xaa, 0x5e, 0x34, 0xee, 0x87, 0xa4, 0x87, 0x13, 0x9f, 0xd5, 0x35,
0xc1, 0xf7, 0xde, 0x5f, 0x5a, 0x75, 0x5e, 0x89, 0xd0, 0x4e, 0xe2, 0x4f, 0x59, 0x7b, 0xb9, 0x21,
0x45, 0x62, 0x3c, 0x09, 0xa8, 0x2f, 0x91, 0xf4, 0x7f, 0x23, 0x9d, 0x88, 0x50, 0x05, 0x89, 0xe5,
0x06, 0x84, 0xc0, 0xe4, 0xd8, 0x67, 0x75, 0xa3, 0x69, 0xb4, 0x2a, 0xae, 0x38, 0x37, 0x9e, 0xc3,
0xda, 0x5c, 0xf1, 0x25, 0x9a, 0x6c, 0xaa, 0x9a, 0x68, 0x4a, 0xf7, 0x69, 0xfa, 0x5c, 0xc5, 0xff,
0xa5, 0x57, 0x94, 0x74, 0xfb, 0x97, 0x06, 0x30, 0xdb, 0x16, 0x74, 0x0b, 0x60, 0x10, 0x51, 0x4a,
0x06, 0x3c, 0x88, 0x68, 0x86, 0xa0, 0x58, 0x50, 0xf7, 0xca, 0x0e, 0x98, 0x42, 0x89, 0xbd, 0xa5,
0x8b, 0x77, 0x4d, 0x7b, 0x70, 0x64, 0x96, 0x0d, 0xcb, 0xb4, 0x3f, 0xc1, 0xba, 0x14, 0xd5, 0xc5,
0xd4, 0x27, 0x07, 0x41, 0xc8, 0x49, 0x82, 0x76, 0x01, 0x66, 0x1b, 0x91, 0x55, 0xaa, 0xe4, 0x73,
0x4e, 0x19, 0x8c, 0xf0, 0x65, 0xa6, 0x70, 0x7a, 0x14, 0x96, 0x80, 0x8a, 0xfb, 0x95, 0x5a, 0x02,
0x6a, 0x1f, 0x02, 0x92, 0x6a, 0x77, 0xbf, 0x8c, 0x71, 0xc8, 0x66, 0xc0, 0xb3, 0x05, 0x99, 0x02,
0xe7, 0x63, 0x5f, 0xae, 0xbe, 0x7d, 0x17, 0xac, 0x53, 0xec, 0x1f, 0x27, 0x84, 0x11, 0xca, 0x33,
0x20, 0x0b, 0x0c, 0x8e, 0xa7, 0x08, 0xe9, 0xd1, 0xfe, 0xaa, 0x83, 0x79, 0x1c, 0x45, 0x61, 0xba,
0x3a, 0x14, 0x8f, 0x48, 0xe6, 0x13, 0x67, 0xf4, 0x1e, 0x36, 0xb3, 0x86, 0x92, 0xb4, 0xcd, 0xde,
0x50, 0xa0, 0x4c, 0x37, 0xf4, 0xa6, 0x32, 0x97, 0x05, 0x31, 0x5c, 0xe4, 0xcd, 0x9b, 0x18, 0xfa,
0x08, 0x5b, 0x59, 0x1f, 0x44, 0xb4, 0x97, 0x03, 0xca, 0x41, 0xef, 0xaa, 0x2b, 0xbf, 0xa0, 0x82,
0xbb, 0xc1, 0x16, 0x6c, 0x0c, 0xbd, 0x85, 0x0d, 0x8e, 0xfd, 0x5e, 0x2c, 0xdb, 0xcc, 0x01, 0xe5,
0xeb, 0x71, 0x43, 0x7d, 0x3d, 0xe6, 0xb4, 0x70, 0xd7, 0xf9, 0x9c, 0x85, 0x65, 0xb3, 0xfd, 0xad,
0x41, 0xed, 0x5d, 0x9a, 0x73, 0x9c, 0x44, 0xc3, 0x20, 0x24, 0x4b, 0xa5, 0xd9, 0x83, 0x42, 0x1c,
0x45, 0xa1, 0xbc, 0x6a, 0xd5, 0xfd, 0x35, 0xa5, 0x52, 0x2a, 0xa7, 0x2b, 0xbd, 0xe8, 0xf5, 0x92,
0x37, 0x4d, 0xbd, 0xd9, 0x6a, 0x9d, 0xeb, 0xdb, 0x68, 0xd3, 0x2a, 0xd8, 0x3f, 0x75, 0x28, 0x08,
0x36, 0x68, 0x07, 0xca, 0x82, 0x5c, 0x2f, 0xff, 0x25, 0x94, 0xc4, 0xf7, 0xa1, 0x87, 0xee, 0xc0,
0x8a, 0x74, 0xc5, 0x92, 0x72, 0xb6, 0x71, 0xb5, 0x91, 0x2a, 0xd7, 0x1e, 0xac, 0xca, 0xa0, 0xe1,
0x98, 0xca, 0x7b, 0x6e, 0x88, 0x28, 0x99, 0x7a, 0x90, 0x19, 0xd1, 0x03, 0x28, 0x71, 0xf1, 0xa2,
0x4f, 0xc7, 0xbf, 0xbe, 0xf0, 0xd6, 0xbb, 0xd3, 0x08, 0xf4, 0xe2, 0x8a, 0x8e, 0x25, 0x11, 0xdf,
0x9c, 0xd7, 0xf1, 0x3a, 0x04, 0x2c, 0x58, 0xc5, 0x23, 0xb3, 0x5c, 0xb4, 0x4a, 0x2f, 0x9d, 0xcf,
0xcd, 0x94, 0xcf, 0x43, 0x49, 0xc8, 0x23, 0x17, 0xed, 0xd9, 0x67, 0x3b, 0x3e, 0xf7, 0xdb, 0x71,
0xff, 0x9b, 0x5e, 0xf9, 0x10, 0x13, 0x2a, 0xc8, 0xf6, 0x8b, 0x02, 0xf4, 0xd1, 0x9f, 0x00, 0x00,
0x00, 0xff, 0xff, 0x57, 0x22, 0xdc, 0xde, 0xda, 0x07, 0x00, 0x00,
}

@ -27,7 +27,7 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type QueryTicketsRequest struct {
// The Pool representing the set of Filters to be queried.
// A Pool is consists of a set of Filters.
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -67,7 +67,7 @@ func (m *QueryTicketsRequest) GetPool() *Pool {
}
type QueryTicketsResponse struct {
// Tickets that meet all the filtering criteria requested by the pool.
// Tickets that satisfy all the filtering criteria.
Tickets []*Ticket `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -106,134 +106,48 @@ func (m *QueryTicketsResponse) GetTickets() []*Ticket {
return nil
}
type QueryTicketIdsRequest struct {
// The Pool representing the set of Filters to be queried.
Pool *Pool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryTicketIdsRequest) Reset() { *m = QueryTicketIdsRequest{} }
func (m *QueryTicketIdsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryTicketIdsRequest) ProtoMessage() {}
func (*QueryTicketIdsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_5ec7651f31a90698, []int{2}
}
func (m *QueryTicketIdsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryTicketIdsRequest.Unmarshal(m, b)
}
func (m *QueryTicketIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryTicketIdsRequest.Marshal(b, m, deterministic)
}
func (m *QueryTicketIdsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryTicketIdsRequest.Merge(m, src)
}
func (m *QueryTicketIdsRequest) XXX_Size() int {
return xxx_messageInfo_QueryTicketIdsRequest.Size(m)
}
func (m *QueryTicketIdsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_QueryTicketIdsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_QueryTicketIdsRequest proto.InternalMessageInfo
func (m *QueryTicketIdsRequest) GetPool() *Pool {
if m != nil {
return m.Pool
}
return nil
}
type QueryTicketIdsResponse struct {
// TicketIDs that meet all the filtering criteria requested by the pool.
Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryTicketIdsResponse) Reset() { *m = QueryTicketIdsResponse{} }
func (m *QueryTicketIdsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryTicketIdsResponse) ProtoMessage() {}
func (*QueryTicketIdsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_5ec7651f31a90698, []int{3}
}
func (m *QueryTicketIdsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryTicketIdsResponse.Unmarshal(m, b)
}
func (m *QueryTicketIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryTicketIdsResponse.Marshal(b, m, deterministic)
}
func (m *QueryTicketIdsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryTicketIdsResponse.Merge(m, src)
}
func (m *QueryTicketIdsResponse) XXX_Size() int {
return xxx_messageInfo_QueryTicketIdsResponse.Size(m)
}
func (m *QueryTicketIdsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_QueryTicketIdsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_QueryTicketIdsResponse proto.InternalMessageInfo
func (m *QueryTicketIdsResponse) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
func init() {
proto.RegisterType((*QueryTicketsRequest)(nil), "openmatch.QueryTicketsRequest")
proto.RegisterType((*QueryTicketsResponse)(nil), "openmatch.QueryTicketsResponse")
proto.RegisterType((*QueryTicketIdsRequest)(nil), "openmatch.QueryTicketIdsRequest")
proto.RegisterType((*QueryTicketIdsResponse)(nil), "openmatch.QueryTicketIdsResponse")
}
func init() { proto.RegisterFile("api/query.proto", fileDescriptor_5ec7651f31a90698) }
var fileDescriptor_5ec7651f31a90698 = []byte{
// 577 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xdf, 0x4e, 0x13, 0x4f,
0x14, 0xc7, 0xb3, 0x5b, 0x02, 0x61, 0xf8, 0xe5, 0x07, 0x8e, 0x42, 0x48, 0x63, 0x70, 0x58, 0x62,
0x02, 0xc5, 0xee, 0x94, 0xca, 0x55, 0xd5, 0x04, 0x04, 0x2e, 0x48, 0x8a, 0x7f, 0x8a, 0xf1, 0xc2,
0xbb, 0xe9, 0xec, 0x71, 0x3b, 0xd2, 0xce, 0x19, 0x66, 0x66, 0x41, 0x12, 0xaf, 0x8c, 0x4f, 0x20,
0x37, 0xc6, 0x47, 0xf0, 0x25, 0x7c, 0x08, 0x5f, 0xc1, 0xf8, 0x1c, 0x66, 0x77, 0x8b, 0x94, 0x7f,
0x26, 0x5e, 0xb5, 0x3b, 0xdf, 0xef, 0x39, 0xdf, 0xcf, 0x9c, 0x99, 0x21, 0xd3, 0xc2, 0x28, 0x7e,
0x98, 0x81, 0x3d, 0x89, 0x8d, 0x45, 0x8f, 0x74, 0x12, 0x0d, 0xe8, 0x81, 0xf0, 0xb2, 0x57, 0xa5,
0xb9, 0x36, 0x00, 0xe7, 0x44, 0x0a, 0xae, 0x94, 0xab, 0x77, 0x53, 0xc4, 0xb4, 0x0f, 0x3c, 0x97,
0x84, 0xd6, 0xe8, 0x85, 0x57, 0xa8, 0xcf, 0xd4, 0x07, 0xc5, 0x8f, 0xac, 0xa7, 0xa0, 0xeb, 0xee,
0x58, 0xa4, 0x29, 0x58, 0x8e, 0xa6, 0x70, 0x5c, 0x75, 0x47, 0x2d, 0x72, 0xfb, 0x65, 0x9e, 0xfc,
0x4a, 0xc9, 0x03, 0xf0, 0xae, 0x03, 0x87, 0x19, 0x38, 0x4f, 0x97, 0xc8, 0x98, 0x41, 0xec, 0xcf,
0x07, 0x2c, 0x58, 0x9e, 0x6a, 0x4e, 0xc7, 0x7f, 0x80, 0xe2, 0x17, 0x88, 0xfd, 0x4e, 0x21, 0x46,
0x5b, 0xe4, 0xce, 0xc5, 0x5a, 0x67, 0x50, 0x3b, 0xa0, 0xab, 0x64, 0xc2, 0x97, 0x4b, 0xf3, 0x01,
0xab, 0x2c, 0x4f, 0x35, 0x6f, 0x8d, 0xd4, 0x97, 0xe6, 0xce, 0x99, 0x23, 0x7a, 0x4c, 0x66, 0x47,
0x9a, 0xec, 0x26, 0xff, 0x86, 0x50, 0x23, 0x73, 0x97, 0xab, 0x87, 0x10, 0x33, 0xa4, 0xa2, 0x92,
0x12, 0x60, 0xb2, 0x93, 0xff, 0x6d, 0x9e, 0x86, 0xe4, 0xbf, 0xc2, 0xbc, 0x0f, 0xf6, 0x48, 0x49,
0xa0, 0x1f, 0x86, 0xdf, 0x43, 0x7e, 0xba, 0x30, 0x92, 0x71, 0xcd, 0x50, 0xaa, 0xf7, 0x6e, 0xd4,
0xcb, 0xcc, 0x68, 0xe5, 0xe3, 0x8f, 0x9f, 0xa7, 0xe1, 0x52, 0xb4, 0xc0, 0x8f, 0xd6, 0xca, 0x03,
0x75, 0x65, 0x14, 0x1f, 0xee, 0xb6, 0x55, 0x2c, 0xb6, 0x82, 0x5a, 0x23, 0xa0, 0x9f, 0x02, 0xf2,
0xff, 0x45, 0x76, 0xca, 0xae, 0x0f, 0x38, 0x1f, 0x4a, 0x75, 0xf1, 0x2f, 0x8e, 0x21, 0xc4, 0x6a,
0x01, 0x71, 0x3f, 0x62, 0x37, 0x40, 0xa8, 0x64, 0x14, 0xe3, 0xe9, 0x97, 0xca, 0xe7, 0xcd, 0x5f,
0x21, 0xfd, 0x1e, 0x90, 0xd9, 0xbd, 0x3d, 0xd6, 0xc6, 0x54, 0x49, 0xb6, 0xbc, 0x2d, 0xbc, 0x60,
0x6d, 0x71, 0x02, 0x76, 0x25, 0xda, 0x25, 0xe4, 0xb9, 0x01, 0xcd, 0xf6, 0xf2, 0x50, 0x3a, 0xd7,
0xf3, 0xde, 0xb8, 0x16, 0xe7, 0x39, 0x47, 0xbd, 0x04, 0x49, 0xe0, 0xa8, 0xba, 0x74, 0xfe, 0x5d,
0x4f, 0x94, 0x93, 0x99, 0x73, 0x1b, 0xe5, 0x35, 0x4d, 0x2d, 0x66, 0xc6, 0xc5, 0x12, 0x07, 0xb5,
0xd7, 0x84, 0x6e, 0x1a, 0x21, 0x7b, 0xc0, 0x9a, 0x71, 0x83, 0xb5, 0x95, 0x84, 0xfc, 0xa4, 0x36,
0xce, 0x5a, 0xa6, 0xca, 0xf7, 0xb2, 0x6e, 0xee, 0xe4, 0x65, 0xe9, 0x5b, 0xb4, 0xa9, 0x18, 0x80,
0x1b, 0x09, 0xe3, 0xdd, 0x3e, 0x76, 0xf9, 0x40, 0x38, 0x0f, 0x96, 0xb7, 0x77, 0xb7, 0x76, 0x9e,
0xed, 0xef, 0x34, 0x2b, 0x6b, 0x71, 0xa3, 0x16, 0x06, 0x61, 0x73, 0x46, 0x18, 0xd3, 0x57, 0xb2,
0xb8, 0xe1, 0xfc, 0x9d, 0x43, 0xdd, 0xba, 0xb2, 0xd2, 0x79, 0x44, 0x2a, 0xeb, 0x8d, 0x75, 0xba,
0x4e, 0x6a, 0x1d, 0xf0, 0x99, 0xd5, 0x90, 0xb0, 0xe3, 0x1e, 0x68, 0xe6, 0x7b, 0xc0, 0x2c, 0x38,
0xcc, 0xac, 0x04, 0x96, 0x20, 0x38, 0xa6, 0xd1, 0x33, 0x78, 0xaf, 0x9c, 0x8f, 0xe9, 0x38, 0x19,
0xfb, 0x1a, 0x06, 0x13, 0xf6, 0x09, 0x99, 0x3f, 0x1f, 0x06, 0xdb, 0x46, 0x99, 0x0d, 0x40, 0x97,
0x2f, 0x8a, 0x2e, 0x5e, 0x3f, 0x1a, 0xee, 0x94, 0x07, 0x9e, 0xa0, 0x74, 0xfc, 0x0d, 0xbb, 0x24,
0x8d, 0xec, 0xcb, 0x1c, 0xa4, 0xdc, 0x74, 0xbf, 0x85, 0x93, 0x79, 0xff, 0xa2, 0x7d, 0x77, 0xbc,
0x78, 0xa2, 0x0f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x69, 0xc6, 0x96, 0x66, 0x20, 0x04, 0x00,
0x00,
// 516 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xcd, 0x6e, 0x13, 0x3d,
0x14, 0xd5, 0x4c, 0xaa, 0x56, 0x75, 0x3f, 0xa9, 0x1f, 0xe6, 0x47, 0x55, 0x84, 0x8a, 0x49, 0x37,
0x69, 0x20, 0xe3, 0x74, 0xc8, 0x2a, 0x08, 0xa9, 0xa5, 0xed, 0xa2, 0x52, 0xc2, 0x4f, 0x8a, 0x58,
0xb0, 0x73, 0x9c, 0xcb, 0x8c, 0x69, 0xc6, 0xd7, 0xb5, 0x3d, 0x29, 0x95, 0x58, 0xb1, 0x66, 0x05,
0x1b, 0xc4, 0x23, 0xf0, 0x12, 0x3c, 0x04, 0xaf, 0x80, 0x78, 0x0e, 0x34, 0x33, 0x09, 0x0d, 0xb4,
0xac, 0x46, 0xbe, 0xe7, 0xdc, 0x73, 0xce, 0x1c, 0x9b, 0xac, 0x0b, 0xa3, 0xf8, 0x69, 0x0e, 0xf6,
0x3c, 0x32, 0x16, 0x3d, 0xd2, 0x55, 0x34, 0xa0, 0x33, 0xe1, 0x65, 0x5a, 0xa7, 0x05, 0x96, 0x81,
0x73, 0x22, 0x01, 0x57, 0xc1, 0xf5, 0xdb, 0x09, 0x62, 0x32, 0x01, 0x5e, 0x40, 0x42, 0x6b, 0xf4,
0xc2, 0x2b, 0xd4, 0x73, 0xf4, 0x7e, 0xf9, 0x91, 0xed, 0x04, 0x74, 0xdb, 0x9d, 0x89, 0x24, 0x01,
0xcb, 0xd1, 0x94, 0x8c, 0xcb, 0xec, 0x46, 0x8f, 0x5c, 0x7f, 0x5e, 0x38, 0xbf, 0x50, 0xf2, 0x04,
0xbc, 0x1b, 0xc2, 0x69, 0x0e, 0xce, 0xd3, 0x2d, 0xb2, 0x64, 0x10, 0x27, 0x1b, 0x01, 0x0b, 0x9a,
0x6b, 0xf1, 0x7a, 0xf4, 0x3b, 0x50, 0xf4, 0x0c, 0x71, 0x32, 0x2c, 0xc1, 0xc6, 0x3e, 0xb9, 0xf1,
0xe7, 0xae, 0x33, 0xa8, 0x1d, 0xd0, 0x7b, 0x64, 0xc5, 0x57, 0xa3, 0x8d, 0x80, 0xd5, 0x9a, 0x6b,
0xf1, 0xb5, 0x85, 0xfd, 0x8a, 0x3c, 0x9c, 0x33, 0xe2, 0x0f, 0x01, 0xf9, 0xaf, 0x54, 0x39, 0x06,
0x3b, 0x55, 0x12, 0xe8, 0xbb, 0xd9, 0x79, 0xa6, 0x4a, 0x37, 0x17, 0x96, 0xaf, 0x88, 0x5a, 0xbf,
0xf3, 0x4f, 0xbc, 0x8a, 0xd3, 0xd8, 0x7e, 0xff, 0xfd, 0xc7, 0xa7, 0x70, 0xab, 0xb1, 0xc9, 0xa7,
0x3b, 0x55, 0xcd, 0xae, 0xb2, 0xe2, 0xb3, 0x0c, 0xbd, 0x72, 0xd8, 0x0b, 0x5a, 0x9d, 0xe0, 0xf1,
0xe7, 0xda, 0xc7, 0xbd, 0x9f, 0x21, 0xfd, 0x16, 0x90, 0x9b, 0x83, 0x01, 0xeb, 0x63, 0xa2, 0x24,
0x6b, 0x1e, 0x08, 0x2f, 0x58, 0x5f, 0x9c, 0x83, 0xdd, 0x6e, 0x1c, 0x11, 0xf2, 0xd4, 0x80, 0x66,
0x83, 0xc2, 0x90, 0xde, 0x4a, 0xbd, 0x37, 0xae, 0xc7, 0x79, 0x91, 0xa1, 0x5d, 0x85, 0x18, 0xc3,
0xb4, 0xbe, 0x75, 0x71, 0x6e, 0x8f, 0x95, 0x93, 0xb9, 0x73, 0xbb, 0xd5, 0xad, 0x25, 0x16, 0x73,
0xe3, 0x22, 0x89, 0x59, 0xeb, 0x25, 0xa1, 0x7b, 0x46, 0xc8, 0x14, 0x58, 0x1c, 0x75, 0x58, 0x5f,
0x49, 0x28, 0xda, 0xdb, 0x9d, 0x4b, 0x26, 0xca, 0xa7, 0xf9, 0xa8, 0x60, 0xf2, 0x6a, 0xf5, 0x35,
0xda, 0x44, 0x64, 0xe0, 0x16, 0xcc, 0xf8, 0x68, 0x82, 0x23, 0x9e, 0x09, 0xe7, 0xc1, 0xf2, 0xfe,
0xd1, 0xfe, 0xe1, 0x93, 0xe3, 0xc3, 0xb8, 0xb6, 0x13, 0x75, 0x5a, 0x61, 0x10, 0xc6, 0xff, 0x0b,
0x63, 0x26, 0x4a, 0x96, 0x17, 0xce, 0xdf, 0x38, 0xd4, 0xbd, 0x4b, 0x93, 0xe1, 0x43, 0x52, 0xeb,
0x76, 0xba, 0xb4, 0x4b, 0x5a, 0x43, 0xf0, 0xb9, 0xd5, 0x30, 0x66, 0x67, 0x29, 0x68, 0xe6, 0x53,
0x60, 0x16, 0x1c, 0xe6, 0x56, 0x02, 0x1b, 0x23, 0x38, 0xa6, 0xd1, 0x33, 0x78, 0xab, 0x9c, 0x8f,
0xe8, 0x32, 0x59, 0xfa, 0x12, 0x06, 0x2b, 0xf6, 0x11, 0xd9, 0xb8, 0x28, 0x83, 0x1d, 0xa0, 0xcc,
0x33, 0xd0, 0xd5, 0x03, 0xa3, 0x77, 0xaf, 0xae, 0x86, 0x3b, 0xe5, 0x81, 0x8f, 0x51, 0x3a, 0xfe,
0x8a, 0xfd, 0x05, 0x2d, 0xfc, 0x97, 0x39, 0x49, 0xb8, 0x19, 0x7d, 0x0d, 0x57, 0x0b, 0xfd, 0x52,
0x7e, 0xb4, 0x5c, 0xbe, 0xd8, 0x07, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x04, 0x46, 0x55,
0x2f, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -250,14 +164,9 @@ const _ = grpc.SupportPackageIsVersion4
type QueryServiceClient interface {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
QueryTickets(ctx context.Context, in *QueryTicketsRequest, opts ...grpc.CallOption) (QueryService_QueryTicketsClient, error)
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
QueryTicketIds(ctx context.Context, in *QueryTicketIdsRequest, opts ...grpc.CallOption) (QueryService_QueryTicketIdsClient, error)
}
type queryServiceClient struct {
@ -300,50 +209,13 @@ func (x *queryServiceQueryTicketsClient) Recv() (*QueryTicketsResponse, error) {
return m, nil
}
func (c *queryServiceClient) QueryTicketIds(ctx context.Context, in *QueryTicketIdsRequest, opts ...grpc.CallOption) (QueryService_QueryTicketIdsClient, error) {
stream, err := c.cc.NewStream(ctx, &_QueryService_serviceDesc.Streams[1], "/openmatch.QueryService/QueryTicketIds", opts...)
if err != nil {
return nil, err
}
x := &queryServiceQueryTicketIdsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type QueryService_QueryTicketIdsClient interface {
Recv() (*QueryTicketIdsResponse, error)
grpc.ClientStream
}
type queryServiceQueryTicketIdsClient struct {
grpc.ClientStream
}
func (x *queryServiceQueryTicketIdsClient) Recv() (*QueryTicketIdsResponse, error) {
m := new(QueryTicketIdsResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// QueryServiceServer is the server API for QueryService service.
type QueryServiceServer interface {
// QueryTickets gets a list of Tickets that match all Filters of the input Pool.
// - If the Pool contains no Filters, QueryTickets will return all Tickets in the state storage.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
// QueryTickets pages the Tickets by `storage.pool.size` and stream back response.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000
QueryTickets(*QueryTicketsRequest, QueryService_QueryTicketsServer) error
// QueryTicketIds gets the list of TicketIDs that meet all the filtering criteria requested by the pool.
// - If the Pool contains no Filters, QueryTicketIds will return all TicketIDs in the state storage.
// QueryTicketIds pages the TicketIDs by `storage.pool.size` and stream back responses.
// - storage.pool.size is default to 1000 if not set, and has a mininum of 10 and maximum of 10000.
QueryTicketIds(*QueryTicketIdsRequest, QueryService_QueryTicketIdsServer) error
}
// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations.
@ -353,9 +225,6 @@ type UnimplementedQueryServiceServer struct {
func (*UnimplementedQueryServiceServer) QueryTickets(req *QueryTicketsRequest, srv QueryService_QueryTicketsServer) error {
return status.Errorf(codes.Unimplemented, "method QueryTickets not implemented")
}
func (*UnimplementedQueryServiceServer) QueryTicketIds(req *QueryTicketIdsRequest, srv QueryService_QueryTicketIdsServer) error {
return status.Errorf(codes.Unimplemented, "method QueryTicketIds not implemented")
}
func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) {
s.RegisterService(&_QueryService_serviceDesc, srv)
@ -382,27 +251,6 @@ func (x *queryServiceQueryTicketsServer) Send(m *QueryTicketsResponse) error {
return x.ServerStream.SendMsg(m)
}
func _QueryService_QueryTicketIds_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(QueryTicketIdsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(QueryServiceServer).QueryTicketIds(m, &queryServiceQueryTicketIdsServer{stream})
}
type QueryService_QueryTicketIdsServer interface {
Send(*QueryTicketIdsResponse) error
grpc.ServerStream
}
type queryServiceQueryTicketIdsServer struct {
grpc.ServerStream
}
func (x *queryServiceQueryTicketIdsServer) Send(m *QueryTicketIdsResponse) error {
return x.ServerStream.SendMsg(m)
}
var _QueryService_serviceDesc = grpc.ServiceDesc{
ServiceName: "openmatch.QueryService",
HandlerType: (*QueryServiceServer)(nil),
@ -413,11 +261,6 @@ var _QueryService_serviceDesc = grpc.ServiceDesc{
Handler: _QueryService_QueryTickets_Handler,
ServerStreams: true,
},
{
StreamName: "QueryTicketIds",
Handler: _QueryService_QueryTicketIds_Handler,
ServerStreams: true,
},
},
Metadata: "api/query.proto",
}

@ -56,31 +56,6 @@ func request_QueryService_QueryTickets_0(ctx context.Context, marshaler runtime.
}
func request_QueryService_QueryTicketIds_0(ctx context.Context, marshaler runtime.Marshaler, client QueryServiceClient, req *http.Request, pathParams map[string]string) (QueryService_QueryTicketIdsClient, runtime.ServerMetadata, error) {
var protoReq QueryTicketIdsRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
stream, err := client.QueryTicketIds(ctx, &protoReq)
if err != nil {
return nil, metadata, err
}
header, err := stream.Header()
if err != nil {
return nil, metadata, err
}
metadata.HeaderMD = header
return stream, metadata, nil
}
// RegisterQueryServiceHandlerServer registers the http handlers for service QueryService to "mux".
// UnaryRPC :call QueryServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@ -93,13 +68,6 @@ func RegisterQueryServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
return
})
mux.Handle("POST", pattern_QueryService_QueryTicketIds_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
_, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
})
return nil
}
@ -161,37 +129,13 @@ func RegisterQueryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
})
mux.Handle("POST", pattern_QueryService_QueryTicketIds_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_QueryService_QueryTicketIds_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_QueryService_QueryTicketIds_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_QueryService_QueryTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "tickets"}, "query", runtime.AssumeColonVerbOpt(true)))
pattern_QueryService_QueryTicketIds_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "queryservice", "ticketids"}, "query", runtime.AssumeColonVerbOpt(true)))
)
var (
forward_QueryService_QueryTickets_0 = runtime.ForwardResponseStream
forward_QueryService_QueryTicketIds_0 = runtime.ForwardResponseStream
)

@ -25,18 +25,19 @@ import (
statestoreTesting "open-match.dev/open-match/internal/statestore/testing"
"open-match.dev/open-match/internal/testing/e2e"
"open-match.dev/open-match/pkg/pb"
"open-match.dev/open-match/test/matchfunction/mmf"
)
func TestServiceHealth(t *testing.T) {
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
if err := om.HealthCheck(); err != nil {
t.Errorf("cluster health checks failed, %s", err)
}
}
func TestGetClients(t *testing.T) {
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
if c := om.MustFrontendGRPC(); c == nil {
t.Error("cannot get frontendService client")
@ -64,7 +65,8 @@ func TestGameMatchWorkFlow(t *testing.T) {
8. Call backend.FetchMatches and verify the response does not contain the tickets got deleted.
*/
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
fe := om.MustFrontendGRPC()
be := om.MustBackendGRPC()
mmfCfg := om.MustMmfConfigGRPC()
@ -120,12 +122,12 @@ func TestGameMatchWorkFlow(t *testing.T) {
var err error
// 1. Create a few tickets with delicate designs and hand crafted search fields
for i := 0; i < len(tickets); i++ {
var ticket *pb.Ticket
ticket, err = fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: tickets[i]}, grpc.WaitForReady(true))
var ctResp *pb.CreateTicketResponse
ctResp, err = fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: tickets[i]}, grpc.WaitForReady(true))
require.Nil(t, err)
require.NotNil(t, ticket)
require.NotNil(t, ctResp)
// Assign Open Match ids back to the input tickets
*tickets[i] = *ticket
*tickets[i] = *ctResp.GetTicket()
}
fmReq := &pb.FetchMatchesRequest{
@ -154,56 +156,48 @@ func TestGameMatchWorkFlow(t *testing.T) {
}
// 2. Call backend.FetchMatches and expects two matches with the following tickets
matches := []*pb.Match{MustMakeMatch(ticket2, ticket3, ticket4), MustMakeMatch(ticket5)}
validateFetchMatchesResponse(ctx, t, matches, be, fmReq)
var wantTickets = [][]*pb.Ticket{{ticket2, ticket3, ticket4}, {ticket5}}
validateFetchMatchesResponse(ctx, t, wantTickets, be, fmReq)
// 3. Call backend.FetchMatches within storage.ignoreListTTL seconds and expects it return a match with ticket1 .
matches = []*pb.Match{MustMakeMatch(ticket1)}
validateFetchMatchesResponse(ctx, t, matches, be, fmReq)
wantTickets = [][]*pb.Ticket{{ticket1}}
validateFetchMatchesResponse(ctx, t, wantTickets, be, fmReq)
// 4. Wait for storage.ignoreListTTL seconds and call backend.FetchMatches the third time, expect the same result as step 2.
time.Sleep(statestoreTesting.IgnoreListTTL)
matches = []*pb.Match{MustMakeMatch(ticket2, ticket3, ticket4), MustMakeMatch(ticket5)}
validateFetchMatchesResponse(ctx, t, matches, be, fmReq)
wantTickets = [][]*pb.Ticket{{ticket2, ticket3, ticket4}, {ticket5}}
validateFetchMatchesResponse(ctx, t, wantTickets, be, fmReq)
// 5. Call backend.AssignTickets to assign DGSs for the tickets in FetchMatches' response
var gotAtResp *pb.AssignTicketsResponse
for _, m := range matches {
for _, tickets := range wantTickets {
tids := []string{}
for _, ticket := range m.Tickets {
for _, ticket := range tickets {
tids = append(tids, ticket.GetId())
}
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: tids,
Assignment: &pb.Assignment{Connection: "agones-1"},
},
},
}
gotAtResp, err = be.AssignTickets(ctx, req, grpc.WaitForReady(true))
gotAtResp, err = be.AssignTickets(ctx, &pb.AssignTicketsRequest{TicketIds: tids, Assignment: &pb.Assignment{Connection: "agones-1"}}, grpc.WaitForReady(true))
require.Nil(t, err)
require.NotNil(t, gotAtResp)
}
// 6. Call backend.FetchMatches and verify it no longer returns tickets got assigned in the previous step.
time.Sleep(statestoreTesting.IgnoreListTTL)
matches = []*pb.Match{MustMakeMatch(ticket1)}
validateFetchMatchesResponse(ctx, t, matches, be, fmReq)
wantTickets = [][]*pb.Ticket{{ticket1}}
validateFetchMatchesResponse(ctx, t, wantTickets, be, fmReq)
// 7. Call frontend.DeleteTicket to delete the tickets returned in step 6.
_, err = fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticket1.GetId()}, grpc.WaitForReady(true))
var gotDtResp *pb.DeleteTicketResponse
gotDtResp, err = fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: ticket1.GetId()}, grpc.WaitForReady(true))
require.Nil(t, err)
require.NotNil(t, gotDtResp)
// 8. Call backend.FetchMatches and verify the response does not contain the tickets got deleted.
time.Sleep(statestoreTesting.IgnoreListTTL)
matches = []*pb.Match{}
validateFetchMatchesResponse(ctx, t, matches, be, fmReq)
wantTickets = [][]*pb.Ticket{}
validateFetchMatchesResponse(ctx, t, wantTickets, be, fmReq)
}
func validateFetchMatchesResponse(ctx context.Context, t *testing.T, expectedMatches []*pb.Match, be pb.BackendServiceClient, fmReq *pb.FetchMatchesRequest) {
func validateFetchMatchesResponse(ctx context.Context, t *testing.T, wantTickets [][]*pb.Ticket, be pb.BackendServiceClient, fmReq *pb.FetchMatchesRequest) {
stream, err := be.FetchMatches(ctx, fmReq, grpc.WaitForReady(true))
require.Nil(t, err)
matches := make([]*pb.Match, 0)
@ -216,13 +210,8 @@ func validateFetchMatchesResponse(ctx context.Context, t *testing.T, expectedMat
matches = append(matches, resp.GetMatch())
}
require.ElementsMatch(t, expectedMatches, matches)
}
func MustMakeMatch(tickets ...*pb.Ticket) *pb.Match {
m, err := mmf.MakeMatch("test-profile", tickets...)
if err != nil {
panic(err)
require.Equal(t, len(wantTickets), len(matches))
for _, match := range matches {
require.Contains(t, wantTickets, match.GetTickets())
}
return m
}

@ -15,9 +15,8 @@
package e2e
import (
"testing"
"open-match.dev/open-match/internal/testing/e2e"
"testing"
)
func TestMain(m *testing.M) {

@ -29,7 +29,8 @@ import (
)
func TestFetchMatches(t *testing.T) {
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
be := om.MustBackendGRPC()

@ -94,7 +94,7 @@ func TestMinimatch(t *testing.T) {
// Test profiles being tested for. Note that each profile embeds two pools - and
// the current MMF returns a match per pool in the profile - so each profile should
// output two matches that are comprised of tickets belonging to that pool.
sourceProfiles := []*testProfile{
sourceProfiles := []testProfile{
{name: "", pools: []*pb.Pool{testPools[e2e.Map1BeginnerPool], testPools[e2e.Map1AdvancedPool]}},
{name: "", pools: []*pb.Pool{testPools[e2e.Map2BeginnerPool], testPools[e2e.Map2AdvancedPool]}},
}
@ -117,13 +117,15 @@ func TestMinimatch(t *testing.T) {
for _, test := range tests {
test := test
testTickets := make([]testTicket, len(sourceTickets))
testProfiles := make([]*testProfile, len(sourceProfiles))
testProfiles := make([]testProfile, len(sourceProfiles))
copy(testTickets, sourceTickets)
copy(testProfiles, sourceProfiles)
t.Run(fmt.Sprintf("TestMinimatch-%v", test.description), func(t *testing.T) {
t.Parallel()
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
fc := test.fcGen(om)
fe := om.MustFrontendGRPC()
@ -132,19 +134,19 @@ func TestMinimatch(t *testing.T) {
// Create all the tickets and validate ticket creation succeeds. Also populate ticket ids
// to expected player pools.
for i := 0; i < len(testTickets); i++ {
for i, td := range testTickets {
resp, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
e2e.DoubleArgDefense: testTickets[i].skill,
testTickets[i].mapValue: float64(time.Now().Unix()),
e2e.DoubleArgDefense: td.skill,
td.mapValue: float64(time.Now().Unix()),
},
},
}})
assert.NotNil(t, resp)
assert.Nil(t, err)
testTickets[i].id = resp.Id
testTickets[i].id = resp.Ticket.Id
}
// poolTickets represents a map of the pool name to all the ticket ids in the pool.
@ -182,7 +184,7 @@ func TestMinimatch(t *testing.T) {
}
// Validate that all the pools have the expected tickets.
assert.ElementsMatch(t, poolTickets[pool.Name], want)
assert.Equal(t, poolTickets[pool.Name], want)
}
testFetchMatches(ctx, t, poolTickets, testProfiles, om, fc)
@ -191,7 +193,7 @@ func TestMinimatch(t *testing.T) {
})
}
func testFetchMatches(ctx context.Context, t *testing.T, poolTickets map[string][]string, testProfiles []*testProfile, om e2e.OM, fc *pb.FunctionConfig) {
func testFetchMatches(ctx context.Context, t *testing.T, poolTickets map[string][]string, testProfiles []testProfile, om e2e.OM, fc *pb.FunctionConfig) {
// Fetch Matches for each test profile.
be := om.MustBackendGRPC()
for _, profile := range testProfiles {

@ -17,203 +17,282 @@
package e2e
import (
"context"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"open-match.dev/open-match/internal/filter/testcases"
internalTesting "open-match.dev/open-match/internal/testing"
"open-match.dev/open-match/internal/testing/e2e"
e2eTesting "open-match.dev/open-match/internal/testing/e2e"
"open-match.dev/open-match/pkg/pb"
)
func TestNoPool(t *testing.T) {
om := e2e.New(t)
func TestQueryTickets(t *testing.T) {
tests := []struct {
description string
pool *pb.Pool
gotTickets []*pb.Ticket
wantCode codes.Code
wantTickets []*pb.Ticket
wantPageCount int
}{
{
description: "expects invalid argument code since pool is empty",
pool: nil,
wantCode: codes.InvalidArgument,
wantTickets: nil,
wantPageCount: 0,
},
{
description: "expects response with no tickets since the store is empty",
gotTickets: []*pb.Ticket{},
pool: &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{
DoubleArg: "ok",
}},
},
wantCode: codes.OK,
wantTickets: nil,
wantPageCount: 0,
},
{
description: "expects response with no tickets since all tickets in the store are filtered out",
gotTickets: internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: e2e.DoubleArgMMR, Min: 0, Max: 10, Interval: 2},
internalTesting.Property{Name: e2e.DoubleArgLevel, Min: 0, Max: 10, Interval: 2},
),
pool: &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{
DoubleArg: e2e.DoubleArgDefense,
}},
},
wantCode: codes.OK,
wantTickets: nil,
wantPageCount: 0,
},
{
description: "expects response with 5 tickets with e2e.FloatRangeDoubleArg1=2 and e2e.FloatRangeDoubleArg2 in range of [0,10)",
gotTickets: internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: e2e.DoubleArgMMR, Min: 0, Max: 10, Interval: 2},
internalTesting.Property{Name: e2e.DoubleArgLevel, Min: 0, Max: 10, Interval: 2},
),
pool: &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{
DoubleArg: e2e.DoubleArgMMR,
Min: 1,
Max: 3,
}},
},
wantCode: codes.OK,
wantTickets: internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: e2e.DoubleArgMMR, Min: 2, Max: 3, Interval: 2},
internalTesting.Property{Name: e2e.DoubleArgLevel, Min: 0, Max: 10, Interval: 2},
),
wantPageCount: 1,
},
{
// Test inclusive filters and paging works as expected
description: "expects response with 15 tickets with e2e.FloatRangeDoubleArg1=2,4,6 and e2e.FloatRangeDoubleArg2=[0,10)",
gotTickets: internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: e2e.DoubleArgMMR, Min: 0, Max: 10, Interval: 2},
internalTesting.Property{Name: e2e.DoubleArgLevel, Min: 0, Max: 10, Interval: 2},
),
pool: &pb.Pool{
DoubleRangeFilters: []*pb.DoubleRangeFilter{{
DoubleArg: e2e.DoubleArgMMR,
Min: 2,
Max: 6,
}},
},
wantCode: codes.OK,
wantTickets: internalTesting.GenerateFloatRangeTickets(
internalTesting.Property{Name: e2e.DoubleArgMMR, Min: 2, Max: 7, Interval: 2},
internalTesting.Property{Name: e2e.DoubleArgLevel, Min: 0, Max: 10, Interval: 2},
),
wantPageCount: 2,
},
{
description: "expects 1 ticket with tag e2eTesting.ModeDemo",
gotTickets: []*pb.Ticket{
{
SearchFields: &pb.SearchFields{
Tags: []string{
e2eTesting.ModeDemo,
},
},
},
{
SearchFields: &pb.SearchFields{
Tags: []string{
"Foo",
},
},
},
},
pool: &pb.Pool{
TagPresentFilters: []*pb.TagPresentFilter{{
Tag: e2e.ModeDemo,
}},
},
wantCode: codes.OK,
wantTickets: []*pb.Ticket{
{
SearchFields: &pb.SearchFields{
Tags: []string{
e2eTesting.ModeDemo,
},
},
},
},
wantPageCount: 1,
},
{
// Test StringEquals works as expected
description: "expects 1 ticket with property e2eTesting.Role maps to warrior",
gotTickets: []*pb.Ticket{
{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
e2eTesting.Role: "warrior",
},
},
},
{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
e2eTesting.Role: "rogue",
},
},
},
},
pool: &pb.Pool{
StringEqualsFilters: []*pb.StringEqualsFilter{
{
StringArg: e2e.Role,
Value: "warrior",
},
},
},
wantCode: codes.OK,
wantTickets: []*pb.Ticket{
{
q := om.MustQueryServiceGRPC()
{
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: nil})
require.Nil(t, err)
resp, err := stream.Recv()
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
require.Nil(t, resp)
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
e2eTesting.Role: "warrior",
},
},
},
},
wantPageCount: 1,
},
{
// Test all tickets
description: "expects all 3 tickets when passing in a pool with no filters",
gotTickets: []*pb.Ticket{
{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
e2eTesting.Role: "warrior",
},
},
},
{
SearchFields: &pb.SearchFields{
Tags: []string{
e2eTesting.ModeDemo,
},
},
},
{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
e2eTesting.DoubleArgMMR: 100,
},
},
},
},
pool: &pb.Pool{},
wantCode: codes.OK,
wantTickets: []*pb.Ticket{
{
SearchFields: &pb.SearchFields{
StringArgs: map[string]string{
e2eTesting.Role: "warrior",
},
},
},
{
SearchFields: &pb.SearchFields{
Tags: []string{
e2eTesting.ModeDemo,
},
},
},
{
SearchFields: &pb.SearchFields{
DoubleArgs: map[string]float64{
e2eTesting.DoubleArgMMR: 100,
},
},
},
},
wantPageCount: 1,
},
}
{
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: nil})
require.Nil(t, err)
t.Run("TestQueryTickets", func(t *testing.T) {
for _, test := range tests {
test := test
t.Run(test.description, func(t *testing.T) {
t.Parallel()
resp, err := stream.Recv()
require.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
require.Nil(t, resp)
}
}
om, closer := e2e.New(t)
defer closer()
fe := om.MustFrontendGRPC()
mml := om.MustQueryServiceGRPC()
pageCounts := 0
ctx := om.Context()
func TestNoTickets(t *testing.T) {
om := e2e.New(t)
for _, ticket := range test.gotTickets {
resp, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: ticket})
assert.NotNil(t, resp)
assert.Nil(t, err)
}
q := om.MustQueryServiceGRPC()
stream, err := mml.QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: test.pool})
assert.Nil(t, err)
{
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
var actualTickets []*pb.Ticket
resp, err := stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
assert.Equal(t, test.wantCode, status.Convert(err).Code())
break
}
{
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
actualTickets = append(actualTickets, resp.Tickets...)
pageCounts++
}
resp, err := stream.Recv()
require.Equal(t, io.EOF, err)
require.Nil(t, resp)
}
}
func TestPaging(t *testing.T) {
om := e2e.New(t)
pageSize := 10 // TODO: read from config
if pageSize < 1 {
require.Fail(t, "invalid page size")
}
totalTickets := pageSize*5 + 1
expectedIds := map[string]struct{}{}
fe := om.MustFrontendGRPC()
for i := 0; i < totalTickets; i++ {
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
require.NotNil(t, resp)
require.Nil(t, err)
expectedIds[resp.Id] = struct{}{}
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
require.Nil(t, err)
foundIds := map[string]struct{}{}
for i := 0; i < 5; i++ {
var resp *pb.QueryTicketsResponse
resp, err = stream.Recv()
require.Nil(t, err)
require.Equal(t, len(resp.Tickets), pageSize)
for _, ticket := range resp.Tickets {
foundIds[ticket.Id] = struct{}{}
require.Equal(t, len(test.wantTickets), len(actualTickets))
// Test fields by fields because of the randomness of the ticket ids...
// TODO: this makes testing overcomplicated. Should figure out a way to avoid the randomness
// This for loop also relies on the fact that redis range query and the ticket generator both returns tickets in sorted order.
// If this fact changes, we might need an ugly nested for loop to do the validness checks.
for i := 0; i < len(actualTickets); i++ {
assert.Equal(t, test.wantTickets[i].GetAssignment(), actualTickets[i].GetAssignment())
assert.Equal(t, test.wantTickets[i].GetSearchFields(), actualTickets[i].GetSearchFields())
}
assert.Equal(t, test.wantPageCount, pageCounts)
})
}
}
resp, err := stream.Recv()
require.Nil(t, err)
require.Equal(t, len(resp.Tickets), 1)
foundIds[resp.Tickets[0].Id] = struct{}{}
require.Equal(t, expectedIds, foundIds)
resp, err = stream.Recv()
require.Equal(t, err, io.EOF)
require.Nil(t, resp)
}
func TestTicketFound(t *testing.T) {
for _, tc := range testcases.IncludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
if !returnedByQuery(t, tc) {
require.Fail(t, "Expected to find ticket in pool but didn't.")
}
if !returnedByQueryID(t, tc) {
require.Fail(t, "Expected to find id in pool but didn't.")
}
})
}
}
func TestTicketNotFound(t *testing.T) {
for _, tc := range testcases.ExcludedTestCases() {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
if returnedByQuery(t, tc) {
require.Fail(t, "Expected to not find ticket in pool but did.")
}
if returnedByQueryID(t, tc) {
require.Fail(t, "Expected to not find id in pool but did.")
}
})
}
}
func returnedByQuery(t *testing.T, tc testcases.TestCase) (found bool) {
om := e2e.New(t)
{
fe := om.MustFrontendGRPC()
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
require.NotNil(t, resp)
require.Nil(t, err)
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTickets(context.Background(), &pb.QueryTicketsRequest{Pool: tc.Pool})
require.Nil(t, err)
tickets := []*pb.Ticket{}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
require.Nil(t, err)
tickets = append(tickets, resp.Tickets...)
}
if len(tickets) > 1 {
require.Fail(t, "More than one ticket found")
}
return len(tickets) == 1
}
func returnedByQueryID(t *testing.T, tc testcases.TestCase) (found bool) {
om := e2e.New(t)
{
fe := om.MustFrontendGRPC()
resp, err := fe.CreateTicket(context.Background(), &pb.CreateTicketRequest{Ticket: tc.Ticket})
require.NotNil(t, resp)
require.Nil(t, err)
}
q := om.MustQueryServiceGRPC()
stream, err := q.QueryTicketIds(context.Background(), &pb.QueryTicketIdsRequest{Pool: tc.Pool})
require.Nil(t, err)
ids := []string{}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
require.Nil(t, err)
ids = append(ids, resp.GetIds()...)
}
if len(ids) > 1 {
require.Fail(t, "More than one ticket found")
}
return len(ids) == 1
})
}

@ -18,11 +18,9 @@ package e2e
import (
"context"
"io"
"testing"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -31,44 +29,8 @@ import (
)
func TestAssignTickets(t *testing.T) {
om := e2e.New(t)
fe := om.MustFrontendGRPC()
be := om.MustBackendGRPC()
ctx := om.Context()
t1, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
t2, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
{
TicketIds: []string{t2.Id},
Assignment: &pb.Assignment{Connection: "b"},
},
},
}
resp, err := be.AssignTickets(ctx, req)
assert.Nil(t, err)
assert.Equal(t, &pb.AssignTicketsResponse{}, resp)
get, err := fe.GetTicket(ctx, &pb.GetTicketRequest{TicketId: t1.Id})
assert.Nil(t, err)
assert.Equal(t, "a", get.Assignment.Connection)
get, err = fe.GetTicket(ctx, &pb.GetTicketRequest{TicketId: t2.Id})
assert.Nil(t, err)
assert.Equal(t, "b", get.Assignment.Connection)
}
func TestAssignTicketsInvalidArgument(t *testing.T) {
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
fe := om.MustFrontendGRPC()
be := om.MustBackendGRPC()
ctx := om.Context()
@ -76,102 +38,79 @@ func TestAssignTicketsInvalidArgument(t *testing.T) {
ctResp, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
for _, tt := range []struct {
name string
req *pb.AssignTicketsRequest
msg string
var tt = []struct {
description string
ticketIds []string
assignment *pb.Assignment
wantAssignment *pb.Assignment
wantCode codes.Code
}{
{
"missing assignment",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{},
},
},
"AssignmentGroup.Assignment is required",
"expects invalid argument code since request is empty",
nil,
nil,
nil,
codes.InvalidArgument,
},
{
"ticket used twice one group",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{ctResp.Id, ctResp.Id},
Assignment: &pb.Assignment{},
},
},
},
"Ticket id " + ctResp.Id + " is assigned multiple times in one assign tickets call.",
"expects invalid argument code since assignment is nil",
[]string{"1"},
nil,
nil,
codes.InvalidArgument,
},
{
"ticket used twice two groups",
&pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{ctResp.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
{
TicketIds: []string{ctResp.Id},
Assignment: &pb.Assignment{Connection: "b"},
},
},
},
"Ticket id " + ctResp.Id + " is assigned multiple times in one assign tickets call.",
"expects not found code since ticket id does not exist in the statestore",
[]string{"2"},
&pb.Assignment{Connection: "localhost"},
nil,
codes.NotFound,
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
_, err := be.AssignTickets(ctx, tt.req)
assert.Equal(t, codes.InvalidArgument, status.Convert(err).Code())
assert.Equal(t, tt.msg, status.Convert(err).Message())
})
}
}
func TestAssignTicketsMissingTicket(t *testing.T) {
om := e2e.New(t)
fe := om.MustFrontendGRPC()
be := om.MustBackendGRPC()
ctx := om.Context()
t1, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
t2, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
t3, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
_, err = fe.DeleteTicket(ctx, &pb.DeleteTicketRequest{TicketId: t2.Id})
assert.Nil(t, err)
req := &pb.AssignTicketsRequest{
Assignments: []*pb.AssignmentGroup{
{
TicketIds: []string{t1.Id, t2.Id, t3.Id},
Assignment: &pb.Assignment{Connection: "a"},
},
{
"expects not found code since ticket id 'unknown id' does not exist in the statestore",
[]string{ctResp.GetTicket().GetId(), "unknown id"},
&pb.Assignment{Connection: "localhost"},
nil,
codes.NotFound,
},
{
"expects ok code",
[]string{ctResp.GetTicket().GetId()},
&pb.Assignment{Connection: "localhost"},
&pb.Assignment{Connection: "localhost"},
codes.OK,
},
}
resp, err := be.AssignTickets(ctx, req)
assert.Nil(t, err)
assert.Equal(t, &pb.AssignTicketsResponse{
Failures: []*pb.AssignmentFailure{
{
TicketId: t2.Id,
Cause: pb.AssignmentFailure_TICKET_NOT_FOUND,
},
},
}, resp)
t.Run("TestAssignTickets", func(t *testing.T) {
for _, test := range tt {
test := test
t.Run(test.description, func(t *testing.T) {
t.Parallel()
ctx := om.Context()
_, err := be.AssignTickets(ctx, &pb.AssignTicketsRequest{TicketIds: test.ticketIds, Assignment: test.assignment})
assert.Equal(t, test.wantCode, status.Convert(err).Code())
// If assign ticket succeeds, validate the assignment
if err == nil {
for _, id := range test.ticketIds {
gtResp, err := fe.GetTicket(ctx, &pb.GetTicketRequest{TicketId: id})
assert.Nil(t, err)
// grpc will write something to the reserved fields of this protobuf object, so we have to do comparisons fields by fields.
assert.Equal(t, test.wantAssignment.GetConnection(), gtResp.GetAssignment().GetConnection())
}
}
})
}
})
}
// TestTicketLifeCycle tests creating, getting and deleting a ticket using Frontend service.
func TestTicketLifeCycle(t *testing.T) {
assert := assert.New(t)
om := e2e.New(t)
om, closer := e2e.New(t)
defer closer()
fe := om.MustFrontendGRPC()
assert.NotNil(fe)
ctx := om.Context()
@ -182,15 +121,20 @@ func TestTicketLifeCycle(t *testing.T) {
"test-property": 1,
},
},
Assignment: &pb.Assignment{
Connection: "test-tbd",
},
}
// Create a ticket, validate that it got an id and set its id in the expected ticket.
createResp, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: ticket})
assert.NotNil(createResp)
assert.NotNil(createResp.GetId())
resp, err := fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: ticket})
assert.NotNil(resp)
assert.Nil(err)
ticket.Id = createResp.GetId()
validateTicket(t, createResp, ticket)
want := resp.Ticket
assert.NotNil(want)
assert.NotNil(want.GetId())
ticket.Id = want.GetId()
validateTicket(t, resp.GetTicket(), ticket)
// Fetch the ticket and validate that it is identical to the expected ticket.
gotTicket, err := fe.GetTicket(ctx, &pb.GetTicketRequest{TicketId: ticket.GetId()})
@ -233,156 +177,3 @@ func validateDelete(ctx context.Context, t *testing.T, fe pb.FrontendServiceClie
assert.Failf(t, "ticket %v not deleted after 5 seconds", id)
}
func TestEmptyReleaseTicketsRequest(t *testing.T) {
om := e2e.New(t)
be := om.MustBackendGRPC()
ctx := om.Context()
resp, err := be.ReleaseTickets(ctx, &pb.ReleaseTicketsRequest{
TicketIds: nil,
})
assert.Nil(t, err)
assert.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
}
func TestReleaseTickets(t *testing.T) {
om := e2e.New(t)
fe := om.MustFrontendGRPC()
be := om.MustBackendGRPC()
q := om.MustQueryServiceGRPC()
ctx := om.Context()
var ticket *pb.Ticket
{ // Create ticket
var err error
ticket, err = fe.CreateTicket(ctx, &pb.CreateTicketRequest{Ticket: &pb.Ticket{}})
assert.Nil(t, err)
assert.NotEmpty(t, ticket.Id)
}
{ // Ticket present in query
stream, err := q.QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
assert.Nil(t, err)
resp, err := stream.Recv()
assert.Nil(t, err)
assert.Equal(t, &pb.QueryTicketsResponse{
Tickets: []*pb.Ticket{ticket},
}, resp)
resp, err = stream.Recv()
assert.Equal(t, io.EOF, err)
assert.Nil(t, resp)
}
{ // Ticket returned from match
stream, err := be.FetchMatches(ctx, &pb.FetchMatchesRequest{
Config: om.MustMmfConfigGRPC(),
Profile: &pb.MatchProfile{
Name: "test-profile",
Pools: []*pb.Pool{
{Name: "pool"},
},
},
})
assert.Nil(t, err)
resp, err := stream.Recv()
assert.Nil(t, err)
assert.Len(t, resp.Match.Tickets, 1)
assert.Equal(t, ticket, resp.Match.Tickets[0])
resp, err = stream.Recv()
assert.Equal(t, io.EOF, err)
assert.Nil(t, resp)
}
{ // Ticket NOT present in query
stream, err := q.QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
assert.Nil(t, err)
resp, err := stream.Recv()
assert.Equal(t, io.EOF, err)
assert.Nil(t, resp)
}
{ // Return ticket
resp, err := be.ReleaseTickets(ctx, &pb.ReleaseTicketsRequest{
TicketIds: []string{ticket.Id},
})
assert.Nil(t, err)
assert.Equal(t, &pb.ReleaseTicketsResponse{}, resp)
}
{ // Ticket present in query
stream, err := q.QueryTickets(ctx, &pb.QueryTicketsRequest{Pool: &pb.Pool{}})
assert.Nil(t, err)
resp, err := stream.Recv()
assert.Nil(t, err)
assert.Equal(t, &pb.QueryTicketsResponse{
Tickets: []*pb.Ticket{ticket},
}, resp)
resp, err = stream.Recv()
assert.Equal(t, io.EOF, err)
assert.Nil(t, resp)
}
}
func TestCreateTicketErrors(t *testing.T) {
for _, tt := range []struct {
name string
req *pb.CreateTicketRequest
code codes.Code
msg string
}{
{
"missing ticket",
&pb.CreateTicketRequest{
Ticket: nil,
},
codes.InvalidArgument,
".ticket is required",
},
{
"already has assignment",
&pb.CreateTicketRequest{
Ticket: &pb.Ticket{
Assignment: &pb.Assignment{},
},
},
codes.InvalidArgument,
"tickets cannot be created with an assignment",
},
{
"already has create time",
&pb.CreateTicketRequest{
Ticket: &pb.Ticket{
CreateTime: ptypes.TimestampNow(),
},
},
codes.InvalidArgument,
"tickets cannot be created with create time set",
},
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
om := e2e.New(t)
fe := om.MustFrontendGRPC()
ctx := om.Context()
resp, err := fe.CreateTicket(ctx, tt.req)
assert.Nil(t, resp)
s := status.Convert(err)
assert.Equal(t, tt.code, s.Code())
assert.Equal(t, s.Message(), tt.msg)
})
}
}

24
test/evaluator/Dockerfile Normal file

@ -0,0 +1,24 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM open-match-base-build as builder
WORKDIR /go/src/open-match.dev/open-match/test/evaluator
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o evaluator .
FROM gcr.io/distroless/static:nonroot
WORKDIR /app/
COPY --from=builder --chown=nonroot /go/src/open-match.dev/open-match/test/evaluator/evaluator /app/
ENTRYPOINT ["/app/evaluator"]

@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package defaulteval provides a simple score based evaluator.
package defaulteval
package evaluate
import (
"math"
@ -21,7 +20,7 @@ import (
"github.com/golang/protobuf/ptypes"
"github.com/sirupsen/logrus"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/testing/evaluator"
"open-match.dev/open-match/pkg/pb"
)
@ -37,8 +36,8 @@ type matchInp struct {
inp *pb.DefaultEvaluationCriteria
}
// Evaluate sorts the matches by DefaultEvaluationCriteria.Score (optional),
// then returns matches which don't collide with previously returned matches.
// Evaluate is where your custom evaluation logic lives.
// This sample evaluator sorts and deduplicates the input matches.
func Evaluate(p *evaluator.Params) ([]string, error) {
matches := make([]*matchInp, 0, len(p.Matches))
nilEvlautionInputs := 0

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package defaulteval
package evaluate
import (
"testing"
@ -21,7 +21,7 @@ import (
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/stretchr/testify/assert"
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/testing/evaluator"
"open-match.dev/open-match/pkg/pb"
)

@ -14,11 +14,11 @@
package main
import (
"open-match.dev/open-match/internal/app/evaluator"
"open-match.dev/open-match/internal/app/evaluator/defaulteval"
"open-match.dev/open-match/internal/testing/evaluator"
"open-match.dev/open-match/test/evaluator/evaluate"
)
func main() {
// Invoke the harness to setup a GRPC service that handles requests to run the evaluator.
evaluator.RunEvaluator(defaulteval.Evaluate)
evaluator.RunEvaluator(evaluate.Evaluate)
}

@ -20,11 +20,10 @@
package mmf
import (
"sort"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/pkg/errors"
"github.com/rs/xid"
internalMmf "open-match.dev/open-match/internal/testing/mmf"
"open-match.dev/open-match/pkg/pb"
)
@ -42,11 +41,22 @@ func MakeMatches(params *internalMmf.MatchFunctionParams) ([]*pb.Match, error) {
var result []*pb.Match
for _, tickets := range params.PoolNameToTickets {
if len(tickets) != 0 {
m, err := MakeMatch(params.ProfileName, tickets...)
evaluationInput, err := ptypes.MarshalAny(&pb.DefaultEvaluationCriteria{
Score: scoreCalculator(tickets),
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Failed to marshal DefaultEvaluationCriteria.")
}
result = append(result, m)
result = append(result, &pb.Match{
MatchId: xid.New().String(),
MatchProfile: params.ProfileName,
MatchFunction: matchName,
Tickets: tickets,
Extensions: map[string]*any.Any{
"evaluation_input": evaluationInput,
},
})
}
}
@ -65,33 +75,3 @@ func scoreCalculator(tickets []*pb.Ticket) float64 {
}
return matchScore
}
// MakeMatch creates a match given the provided tickets.
func MakeMatch(profileName string, tickets ...*pb.Ticket) (*pb.Match, error) {
// Keep output deterministic
sort.Slice(tickets, func(i, j int) bool {
return tickets[i].Id < tickets[j].Id
})
evaluationInput, err := ptypes.MarshalAny(&pb.DefaultEvaluationCriteria{
Score: scoreCalculator(tickets),
})
if err != nil {
return nil, errors.Wrap(err, "Failed to marshal DefaultEvaluationCriteria.")
}
id := "m"
for _, t := range tickets {
id += t.Id
}
return &pb.Match{
MatchId: id,
MatchProfile: profileName,
MatchFunction: matchName,
Tickets: tickets,
Extensions: map[string]*any.Any{
"evaluation_input": evaluationInput,
},
}, nil
}

@ -93,6 +93,11 @@ func TestMakeMatches(t *testing.T) {
}
matchGen := func(tickets []*pb.Ticket) *pb.Match {
tids := []string{}
for _, ticket := range tickets {
tids = append(tids, ticket.GetId())
}
evaluationInput, err := ptypes.MarshalAny(&pb.DefaultEvaluationCriteria{
Score: scoreCalculator(tickets),
})

Some files were not shown because too many files have changed in this diff Show More